[Debian-ha-commits] [pcs] 01/05: New upstream version 0.9.159

Valentin Vidic vvidic-guest at moszumanska.debian.org
Fri Jun 30 16:37:14 UTC 2017


This is an automated email from the git hooks/post-receive script.

vvidic-guest pushed a commit to branch master
in repository pcs.

commit 7cb40a326a5a10e6061986b83916b57f7fd9146a
Author: Valentin Vidic <Valentin.Vidic at CARNet.hr>
Date:   Fri Jun 30 17:54:55 2017 +0200

    New upstream version 0.9.159
---
 CHANGELOG.md                                       |  56 ++
 pcs/cli/cluster/command.py                         |  10 +-
 pcs/cli/common/console_report.py                   |  39 +-
 pcs/cli/common/lib_wrapper.py                      |   6 +-
 pcs/cli/common/parse_args.py                       |   4 +-
 pcs/cli/common/test/test_console_report.py         | 582 ++++++++++++++++++++-
 pcs/cli/resource/parse_args.py                     |   8 +-
 pcs/cli/resource/test/test_parse_args.py           | 100 +++-
 pcs/cluster.py                                     |  75 ++-
 pcs/common/report_codes.py                         |   1 +
 pcs/config.py                                      |   4 +-
 pcs/lib/booth/env.py                               |   4 +-
 pcs/lib/cib/nvpair.py                              |  12 +-
 pcs/lib/cib/resource/bundle.py                     |  17 +-
 pcs/lib/cib/resource/common.py                     |  40 +-
 pcs/lib/cib/resource/operations.py                 |  25 +-
 pcs/lib/cib/resource/primitive.py                  |   6 +-
 pcs/lib/cib/test/test_nvpair.py                    |  42 ++
 pcs/lib/cib/test/test_resource_common.py           |  16 +-
 pcs/lib/cib/test/test_resource_operations.py       |  22 +
 pcs/lib/cib/tools.py                               |  10 +-
 pcs/lib/commands/cluster.py                        | 108 ++--
 pcs/lib/commands/resource.py                       | 176 +++++--
 pcs/lib/commands/stonith.py                        |  17 +-
 pcs/lib/commands/test/resource/fixture.py          |   2 +-
 .../commands/test/resource/test_bundle_create.py   | 179 ++++++-
 .../commands/test/resource/test_bundle_update.py   | 102 +++-
 .../test/resource/test_resource_enable_disable.py  |  93 +++-
 .../test/resource/test_resource_manage_unmanage.py | 189 ++++++-
 pcs/lib/commands/test/test_stonith_agent.py        | 113 +++-
 pcs/lib/env.py                                     |   7 +-
 pcs/lib/nodes_task.py                              |  25 +-
 pcs/lib/pacemaker/state.py                         |  40 +-
 pcs/lib/pacemaker/test/test_state.py               | 108 +++-
 pcs/lib/reports.py                                 |  24 +
 pcs/lib/resource_agent.py                          |  89 +++-
 pcs/lib/test/test_nodes_task.py                    |   4 -
 pcs/lib/test/test_resource_agent.py                |  69 ++-
 pcs/lib/tools.py                                   |   5 +-
 pcs/pcs.8                                          |  20 +-
 pcs/pcsd.py                                        |  12 +-
 pcs/resource.py                                    | 188 +++++--
 pcs/settings_default.py                            |   2 +-
 pcs/status.py                                      |  67 ++-
 pcs/test/cib_resource/test_bundle.py               | 132 ++++-
 pcs/test/cib_resource/test_create.py               |  49 +-
 pcs/test/cib_resource/test_manage_unmanage.py      |   5 +-
 pcs/test/cib_resource/test_operation_add.py        |  36 +-
 pcs/test/cib_resource/test_stonith_create.py       |  59 +++
 pcs/test/test_cluster.py                           | 128 +++++
 pcs/test/test_cluster_pcmk_remote.py               |  16 +-
 pcs/test/test_constraints.py                       |   2 +-
 pcs/test/test_resource.py                          | 217 +++++---
 pcs/test/test_status.py                            |  89 ++++
 pcs/test/test_stonith.py                           | 139 ++++-
 pcs/test/tools/assertions.py                       |  90 +++-
 pcs/usage.py                                       |  33 +-
 pcs/utils.py                                       |  37 +-
 pcsd/bootstrap.rb                                  |   2 +-
 pcsd/pcs.rb                                        |  23 +-
 pcsd/pcsd.8                                        |   2 +-
 pcsd/pcsd.rb                                       |   3 +-
 pcsd/public/js/nodes-ember.js                      |   3 +-
 pcsd/public/js/pcsd.js                             |  14 +-
 pcsd/remote.rb                                     |  17 +-
 pcsd/ssl.rb                                        |   2 +
 pcsd/views/main.erb                                |   4 +
 pcsd/views/manage.erb                              |  12 +
 setup.py                                           |   2 +-
 69 files changed, 3240 insertions(+), 594 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 1c15a01..bc7c8e5 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,61 @@
 # Change Log
 
+## [0.9.159] - 2017-06-30
+
+### Added
+- Option to create a cluster with or without corosync encryption enabled,
+  by default the encryption is disabled ([rhbz#1165821])
+- It is now possible to disable, enable, unmanage and manage bundle resources
+  and set their meta attributes ([rhbz#1447910])
+- Pcs now warns against using the `action` option of stonith devices
+  ([rhbz#1421702])
+
+### Fixed
+- Fixed crash of the `pcs cluster setup` command when the `--force` flag was
+  used ([rhbz#1176018])
+- Fixed crash of the `pcs cluster destroy --all` command when the cluster was
+  not running ([rhbz#1176018])
+- Fixed crash of the `pcs config restore` command when restoring pacemaker
+  authkey ([rhbz#1176018])
+- Fixed "Error: unable to get cib" when adding a node to a stopped cluster
+  ([rhbz#1176018])
+- Fixed a crash in the `pcs cluster node add-remote` command when an id
+  conflict occurs ([rhbz#1386114])
+- Fixed creating a new cluster from the web UI ([rhbz#1284404])
+- `pcs cluster node add-guest` now works with the flag `--skipp-offline`
+  ([rhbz#1176018])
+- `pcs cluster node remove-guest` can be run again when the guest node was
+  unreachable first time ([rhbz#1176018])
+- Fixed "Error: Unable to read /etc/corosync/corosync.conf" when running
+  `pcs resource create`([rhbz#1386114])
+- It is now possible to set `debug` and `verbose` parameters of stonith devices
+  ([rhbz#1432283])
+- Resource operation ids are now properly validated and no longer ignored in
+  `pcs resource create`, `pcs resource update` and `pcs resource op add`
+  commands ([rhbz#1443418])
+- Flag `--force` works correctly when an operation is not successful on some
+  nodes durrng `pcs cluster node add-remote` or `pcs cluster node add-guest`
+  ([rhbz#1464781])
+
+### Changed
+- Binary data are stored in corosync authkey ([rhbz#1165821])
+- It is now mandatory to specify container type in the `resource bundle create`
+  command
+- When creating a new cluster, corosync communication encryption is disabled
+  by default (in 0.9.158 it was enabled by default, in 0.9.157 and older it was
+  disabled)
+
+[rhbz#1165821]: https://bugzilla.redhat.com/show_bug.cgi?id=1165821
+[rhbz#1176018]: https://bugzilla.redhat.com/show_bug.cgi?id=1176018
+[rhbz#1284404]: https://bugzilla.redhat.com/show_bug.cgi?id=1284404
+[rhbz#1386114]: https://bugzilla.redhat.com/show_bug.cgi?id=1386114
+[rhbz#1421702]: https://bugzilla.redhat.com/show_bug.cgi?id=1421702
+[rhbz#1432283]: https://bugzilla.redhat.com/show_bug.cgi?id=1432283
+[rhbz#1443418]: https://bugzilla.redhat.com/show_bug.cgi?id=1443418
+[rhbz#1447910]: https://bugzilla.redhat.com/show_bug.cgi?id=1447910
+[rhbz#1464781]: https://bugzilla.redhat.com/show_bug.cgi?id=1464781
+
+
 ## [0.9.158] - 2017-05-23
 
 ### Added
diff --git a/pcs/cli/cluster/command.py b/pcs/cli/cluster/command.py
index f725326..d3c83cd 100644
--- a/pcs/cli/cluster/command.py
+++ b/pcs/cli/cluster/command.py
@@ -42,6 +42,7 @@ def node_add_remote(lib, arg_list, modifiers):
         parts["op"],
         parts["meta"],
         parts["options"],
+        skip_offline_nodes=modifiers["skip_offline_nodes"],
         allow_incomplete_distribution=force,
         allow_pacemaker_remote_service_fail=force,
         allow_invalid_operation=force,
@@ -57,6 +58,7 @@ def create_node_remove_remote(remove_resource):
         lib.cluster.node_remove_remote(
             arg_list[0],
             remove_resource,
+            skip_offline_nodes=modifiers["skip_offline_nodes"],
             allow_remove_multiple_nodes=modifiers["force"],
             allow_pacemaker_remote_service_fail=modifiers["force"],
         )
@@ -71,14 +73,13 @@ def node_add_guest(lib, arg_list, modifiers):
     resource_id = arg_list[1]
     meta_options = prepare_options(arg_list[2:])
 
-    force = modifiers["force"]
-
     lib.cluster.node_add_guest(
         node_name,
         resource_id,
         meta_options,
-        allow_incomplete_distribution=force,
-        allow_pacemaker_remote_service_fail=force,
+        skip_offline_nodes=modifiers["skip_offline_nodes"],
+        allow_incomplete_distribution=modifiers["force"],
+        allow_pacemaker_remote_service_fail=modifiers["force"],
         wait=modifiers["wait"],
     )
 
@@ -88,6 +89,7 @@ def node_remove_guest(lib, arg_list, modifiers):
 
     lib.cluster.node_remove_guest(
         arg_list[0],
+        skip_offline_nodes=modifiers["skip_offline_nodes"],
         allow_remove_multiple_nodes=modifiers["force"],
         allow_pacemaker_remote_service_fail=modifiers["force"],
         wait=modifiers["wait"],
diff --git a/pcs/cli/common/console_report.py b/pcs/cli/common/console_report.py
index 793ff8d..c1f03e8 100644
--- a/pcs/cli/common/console_report.py
+++ b/pcs/cli/common/console_report.py
@@ -46,7 +46,7 @@ def format_fencing_level_target(target_type, target_value):
     return target_value
 
 def service_operation_started(operation, info):
-    return "{operation}{service}{instance_suffix}...".format(
+    return "{operation} {service}{instance_suffix}...".format(
         operation=operation,
         instance_suffix=format_optional(info["instance"], INSTANCE_SUFFIX),
         **info
@@ -63,7 +63,7 @@ def service_operation_error(operation, info):
         **info
     )
 
-def service_opration_success(operation, info):
+def service_operation_success(operation, info):
     return "{node_prefix}{service}{instance_suffix} {operation}".format(
         operation=operation,
         instance_suffix=format_optional(info["instance"], INSTANCE_SUFFIX),
@@ -73,7 +73,7 @@ def service_opration_success(operation, info):
 
 def service_operation_skipped(operation, info):
     return (
-        "{node_prefix}not {operation}{service}{instance_suffix} - {reason}"
+        "{node_prefix}not {operation} {service}{instance_suffix}: {reason}"
     ).format(
         operation=operation,
         instance_suffix=format_optional(info["instance"], INSTANCE_SUFFIX),
@@ -205,7 +205,11 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
     codes.INVALID_OPTION: lambda info:
         (
             "invalid {desc}option{s} {option_names_list},"
-            " allowed option{are} {allowed_values}"
+            +
+            (
+                " allowed option{are} {allowed_values}" if info["allowed"]
+                else " there are no options allowed"
+            )
         ).format(
             desc=format_optional(info["option_type"], "{0} "),
             allowed_values=", ".join(sorted(info["allowed"])),
@@ -250,6 +254,23 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
         )
     ,
 
+    codes.DEPRECATED_OPTION: lambda info:
+        (
+            "{desc}option '{option_name}' is deprecated and should not be "
+            "used, use {hint} instead"
+        ).format(
+            desc=format_optional(info["option_type"], "{0} "),
+            hint=(
+                ", ".join(sorted(info["replaced_by"])) if (
+                    isinstance(info["replaced_by"], Iterable)
+                    and
+                    not is_string(info["replaced_by"])
+                ) else info["replaced_by"]
+            ),
+            **info
+        )
+    ,
+
     codes.MUTUALLY_EXCLUSIVE_OPTIONS: lambda info:
         # "{desc}options {option_names} are muttually exclusive".format(
         "Only one of {desc}options {option_names} can be used".format(
@@ -833,18 +854,18 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
 
     codes.SERVICE_START_STARTED: partial(service_operation_started, "Starting"),
     codes.SERVICE_START_ERROR: partial(service_operation_error, "start"),
-    codes.SERVICE_START_SUCCESS: partial(service_opration_success, "started"),
+    codes.SERVICE_START_SUCCESS: partial(service_operation_success, "started"),
     codes.SERVICE_START_SKIPPED: partial(service_operation_skipped, "starting"),
 
     codes.SERVICE_STOP_STARTED: partial(service_operation_started, "Stopping"),
     codes.SERVICE_STOP_ERROR: partial(service_operation_error, "stop"),
-    codes.SERVICE_STOP_SUCCESS: partial(service_opration_success, "stopped"),
+    codes.SERVICE_STOP_SUCCESS: partial(service_operation_success, "stopped"),
 
     codes.SERVICE_ENABLE_STARTED: partial(
         service_operation_started, "Enabling"
     ),
     codes.SERVICE_ENABLE_ERROR: partial(service_operation_error, "enable"),
-    codes.SERVICE_ENABLE_SUCCESS: partial(service_opration_success, "enabled"),
+    codes.SERVICE_ENABLE_SUCCESS: partial(service_operation_success, "enabled"),
     codes.SERVICE_ENABLE_SKIPPED: partial(
         service_operation_skipped, "enabling"
     ),
@@ -853,7 +874,7 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
         partial(service_operation_started, "Disabling")
      ,
     codes.SERVICE_DISABLE_ERROR: partial(service_operation_error, "disable"),
-    codes.SERVICE_DISABLE_SUCCESS: partial(service_opration_success, "disabled"),
+    codes.SERVICE_DISABLE_SUCCESS: partial(service_operation_success, "disabled"),
 
     codes.SERVICE_KILL_ERROR: lambda info:
         "Unable to kill {service_list}: {reason}"
@@ -1192,6 +1213,8 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
         "This command does not support {forbidden_options}"
         .format(
             forbidden_options=joined_list(info["forbidden_options"], {
+                "BOOTH_CONF": "--booth-conf",
+                "BOOTH_KEY": "--booth-key",
                 "CIB": "-f",
                 "COROSYNC_CONF": "--corosync_conf",
             })
diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
index 683ba4d..4d6ed9a 100644
--- a/pcs/cli/common/lib_wrapper.py
+++ b/pcs/cli/common/lib_wrapper.py
@@ -318,7 +318,8 @@ def load_module(env, middleware_factory, name):
         return bind_all(
             env,
             middleware.build(
-                middleware_factory.cib
+                middleware_factory.cib,
+                middleware_factory.corosync_conf_existing,
             ),
             {
                 "bundle_create": resource.bundle_create,
@@ -338,7 +339,8 @@ def load_module(env, middleware_factory, name):
         return bind_all(
             env,
             middleware.build(
-                middleware_factory.cib
+                middleware_factory.cib,
+                middleware_factory.corosync_conf_existing,
             ),
             {
                 "create": stonith.create,
diff --git a/pcs/cli/common/parse_args.py b/pcs/cli/common/parse_args.py
index 465cb96..d72a6d4 100644
--- a/pcs/cli/common/parse_args.py
+++ b/pcs/cli/common/parse_args.py
@@ -17,7 +17,7 @@ PCS_SHORT_OPTIONS = "hf:p:u:V"
 PCS_LONG_OPTIONS = [
     "debug", "version", "help", "fullhelp",
     "force", "skip-offline", "autocorrect", "interactive", "autodelete",
-    "all", "full", "groups", "local", "wait", "config",
+    "all", "full", "groups", "local", "wait", "config", "async",
     "start", "enable", "disabled", "off", "request-timeout=",
     "pacemaker", "corosync",
     "no-default-ops", "defaults", "nodesc",
@@ -32,7 +32,7 @@ PCS_LONG_OPTIONS = [
     "miss_count_const=", "fail_recv_const=",
     "corosync_conf=", "cluster_conf=",
     "booth-conf=", "booth-key=",
-    "remote", "watchdog=", "device=",
+    "remote", "watchdog=", "device=", "encryption=",
     #in pcs status - do not display resorce status on inactive node
     "hide-inactive",
     # pcs resource (un)manage - enable or disable monitor operations
diff --git a/pcs/cli/common/test/test_console_report.py b/pcs/cli/common/test/test_console_report.py
index d80aee1..6d9c280 100644
--- a/pcs/cli/common/test/test_console_report.py
+++ b/pcs/cli/common/test/test_console_report.py
@@ -50,7 +50,7 @@ class BuildInvalidOptionMessageTest(NameBuildTest):
             {
                 "option_names": ["NAME"],
                 "option_type": "TYPE",
-                "allowed": sorted(["FIRST", "SECOND"]),
+                "allowed": ["SECOND", "FIRST"],
             }
         )
 
@@ -60,7 +60,7 @@ class BuildInvalidOptionMessageTest(NameBuildTest):
             {
                 "option_names": ["NAME"],
                 "option_type": "",
-                "allowed": sorted(["FIRST", "SECOND"]),
+                "allowed": ["FIRST", "SECOND"],
             }
         )
 
@@ -74,6 +74,17 @@ class BuildInvalidOptionMessageTest(NameBuildTest):
             }
         )
 
+    def test_no_allowed_options(self):
+        self.assert_message_from_info(
+            "invalid options: 'ANOTHER', 'NAME', there are no options allowed",
+            {
+                "option_names": ["NAME", "ANOTHER"],
+                "option_type": "",
+                "allowed": [],
+            }
+        )
+
+
 class RequiredOptionIsMissing(NameBuildTest):
     code = codes.REQUIRED_OPTION_IS_MISSING
     def test_build_message_with_type(self):
@@ -372,6 +383,35 @@ class InvalidOptionType(NameBuildTest):
             }
         )
 
+
+class DeprecatedOption(NameBuildTest):
+    code = codes.DEPRECATED_OPTION
+
+    def test_no_desc_hint_array(self):
+        self.assert_message_from_info(
+            "option 'option name' is deprecated and should not be used,"
+                " use new_a, new_b instead"
+            ,
+            {
+                "option_name": "option name",
+                "option_type": "",
+                "replaced_by": ["new_b", "new_a"],
+            }
+        )
+
+    def test_desc_hint_string(self):
+        self.assert_message_from_info(
+            "option type option 'option name' is deprecated and should not be"
+                " used, use new option instead"
+            ,
+            {
+                "option_name": "option name",
+                "option_type": "option type",
+                "replaced_by": "new option",
+            }
+        )
+
+
 class StonithResourcesDoNotExist(NameBuildTest):
     code = codes.STONITH_RESOURCES_DO_NOT_EXIST
     def test_success(self):
@@ -1159,3 +1199,541 @@ class NodeToClearIsStillInCluster(NameBuildTest):
                 "node": "node1"
             }
         )
+
+
+class ServiceStartStarted(NameBuildTest):
+    code = codes.SERVICE_START_STARTED
+    def test_minimal(self):
+        self.assert_message_from_info(
+            "Starting a_service...",
+            {
+                "service": "a_service",
+                "instance": None,
+            }
+        )
+
+    def test_with_instance(self):
+        self.assert_message_from_info(
+            "Starting a_service at an_instance...",
+            {
+                "service": "a_service",
+                "instance": "an_instance",
+            }
+        )
+
+
+class ServiceStartError(NameBuildTest):
+    code = codes.SERVICE_START_ERROR
+    def test_minimal(self):
+        self.assert_message_from_info(
+            "Unable to start a_service: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": None,
+                "instance": None,
+            }
+        )
+
+    def test_node(self):
+        self.assert_message_from_info(
+            "a_node: Unable to start a_service: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": "a_node",
+                "instance": None,
+            }
+        )
+
+    def test_instance(self):
+        self.assert_message_from_info(
+            "Unable to start a_service at an_instance: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": None,
+                "instance": "an_instance",
+            }
+        )
+
+    def test_all(self):
+        self.assert_message_from_info(
+            "a_node: Unable to start a_service at an_instance: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": "a_node",
+                "instance": "an_instance",
+            }
+        )
+
+
+class ServiceStartSuccess(NameBuildTest):
+    code = codes.SERVICE_START_SUCCESS
+    def test_minimal(self):
+        self.assert_message_from_info(
+            "a_service started",
+            {
+                "service": "a_service",
+                "node": None,
+                "instance": None,
+            }
+        )
+
+    def test_node(self):
+        self.assert_message_from_info(
+            "a_node: a_service started",
+            {
+                "service": "a_service",
+                "node": "a_node",
+                "instance": None,
+            }
+        )
+
+    def test_instance(self):
+        self.assert_message_from_info(
+            "a_service at an_instance started",
+            {
+                "service": "a_service",
+                "node": None,
+                "instance": "an_instance",
+            }
+        )
+
+    def test_all(self):
+        self.assert_message_from_info(
+            "a_node: a_service at an_instance started",
+            {
+                "service": "a_service",
+                "node": "a_node",
+                "instance": "an_instance",
+            }
+        )
+
+
+class ServiceStartSkipped(NameBuildTest):
+    code = codes.SERVICE_START_SKIPPED
+    def test_minimal(self):
+        self.assert_message_from_info(
+            "not starting a_service: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": None,
+                "instance": None,
+            }
+        )
+
+    def test_node(self):
+        self.assert_message_from_info(
+            "a_node: not starting a_service: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": "a_node",
+                "instance": None,
+            }
+        )
+
+    def test_instance(self):
+        self.assert_message_from_info(
+            "not starting a_service at an_instance: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": None,
+                "instance": "an_instance",
+            }
+        )
+
+    def test_all(self):
+        self.assert_message_from_info(
+            "a_node: not starting a_service at an_instance: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": "a_node",
+                "instance": "an_instance",
+            }
+        )
+
+
+class ServiceStopStarted(NameBuildTest):
+    code = codes.SERVICE_STOP_STARTED
+    def test_minimal(self):
+        self.assert_message_from_info(
+            "Stopping a_service...",
+            {
+                "service": "a_service",
+                "instance": None,
+            }
+        )
+
+    def test_with_instance(self):
+        self.assert_message_from_info(
+            "Stopping a_service at an_instance...",
+            {
+                "service": "a_service",
+                "instance": "an_instance",
+            }
+        )
+
+
+class ServiceStopError(NameBuildTest):
+    code = codes.SERVICE_STOP_ERROR
+    def test_minimal(self):
+        self.assert_message_from_info(
+            "Unable to stop a_service: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": None,
+                "instance": None,
+            }
+        )
+
+    def test_node(self):
+        self.assert_message_from_info(
+            "a_node: Unable to stop a_service: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": "a_node",
+                "instance": None,
+            }
+        )
+
+    def test_instance(self):
+        self.assert_message_from_info(
+            "Unable to stop a_service at an_instance: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": None,
+                "instance": "an_instance",
+            }
+        )
+
+    def test_all(self):
+        self.assert_message_from_info(
+            "a_node: Unable to stop a_service at an_instance: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": "a_node",
+                "instance": "an_instance",
+            }
+        )
+
+
+class ServiceStopSuccess(NameBuildTest):
+    code = codes.SERVICE_STOP_SUCCESS
+    def test_minimal(self):
+        self.assert_message_from_info(
+            "a_service stopped",
+            {
+                "service": "a_service",
+                "node": None,
+                "instance": None,
+            }
+        )
+
+    def test_node(self):
+        self.assert_message_from_info(
+            "a_node: a_service stopped",
+            {
+                "service": "a_service",
+                "node": "a_node",
+                "instance": None,
+            }
+        )
+
+    def test_instance(self):
+        self.assert_message_from_info(
+            "a_service at an_instance stopped",
+            {
+                "service": "a_service",
+                "node": None,
+                "instance": "an_instance",
+            }
+        )
+
+    def test_all(self):
+        self.assert_message_from_info(
+            "a_node: a_service at an_instance stopped",
+            {
+                "service": "a_service",
+                "node": "a_node",
+                "instance": "an_instance",
+            }
+        )
+
+
+class ServiceEnableStarted(NameBuildTest):
+    code = codes.SERVICE_ENABLE_STARTED
+    def test_minimal(self):
+        self.assert_message_from_info(
+            "Enabling a_service...",
+            {
+                "service": "a_service",
+                "instance": None,
+            }
+        )
+
+    def test_with_instance(self):
+        self.assert_message_from_info(
+            "Enabling a_service at an_instance...",
+            {
+                "service": "a_service",
+                "instance": "an_instance",
+            }
+        )
+
+
+class ServiceEnableError(NameBuildTest):
+    code = codes.SERVICE_ENABLE_ERROR
+    def test_minimal(self):
+        self.assert_message_from_info(
+            "Unable to enable a_service: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": None,
+                "instance": None,
+            }
+        )
+
+    def test_node(self):
+        self.assert_message_from_info(
+            "a_node: Unable to enable a_service: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": "a_node",
+                "instance": None,
+            }
+        )
+
+    def test_instance(self):
+        self.assert_message_from_info(
+            "Unable to enable a_service at an_instance: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": None,
+                "instance": "an_instance",
+            }
+        )
+
+    def test_all(self):
+        self.assert_message_from_info(
+            "a_node: Unable to enable a_service at an_instance: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": "a_node",
+                "instance": "an_instance",
+            }
+        )
+
+
+class ServiceEnableSuccess(NameBuildTest):
+    code = codes.SERVICE_ENABLE_SUCCESS
+    def test_minimal(self):
+        self.assert_message_from_info(
+            "a_service enabled",
+            {
+                "service": "a_service",
+                "node": None,
+                "instance": None,
+            }
+        )
+
+    def test_node(self):
+        self.assert_message_from_info(
+            "a_node: a_service enabled",
+            {
+                "service": "a_service",
+                "node": "a_node",
+                "instance": None,
+            }
+        )
+
+    def test_instance(self):
+        self.assert_message_from_info(
+            "a_service at an_instance enabled",
+            {
+                "service": "a_service",
+                "node": None,
+                "instance": "an_instance",
+            }
+        )
+
+    def test_all(self):
+        self.assert_message_from_info(
+            "a_node: a_service at an_instance enabled",
+            {
+                "service": "a_service",
+                "node": "a_node",
+                "instance": "an_instance",
+            }
+        )
+
+
+class ServiceEnableSkipped(NameBuildTest):
+    code = codes.SERVICE_ENABLE_SKIPPED
+    def test_minimal(self):
+        self.assert_message_from_info(
+            "not enabling a_service: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": None,
+                "instance": None,
+            }
+        )
+
+    def test_node(self):
+        self.assert_message_from_info(
+            "a_node: not enabling a_service: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": "a_node",
+                "instance": None,
+            }
+        )
+
+    def test_instance(self):
+        self.assert_message_from_info(
+            "not enabling a_service at an_instance: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": None,
+                "instance": "an_instance",
+            }
+        )
+
+    def test_all(self):
+        self.assert_message_from_info(
+            "a_node: not enabling a_service at an_instance: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": "a_node",
+                "instance": "an_instance",
+            }
+        )
+
+
+class ServiceDisableStarted(NameBuildTest):
+    code = codes.SERVICE_DISABLE_STARTED
+    def test_minimal(self):
+        self.assert_message_from_info(
+            "Disabling a_service...",
+            {
+                "service": "a_service",
+                "instance": None,
+            }
+        )
+
+    def test_with_instance(self):
+        self.assert_message_from_info(
+            "Disabling a_service at an_instance...",
+            {
+                "service": "a_service",
+                "instance": "an_instance",
+            }
+        )
+
+
+class ServiceDisableError(NameBuildTest):
+    code = codes.SERVICE_DISABLE_ERROR
+    def test_minimal(self):
+        self.assert_message_from_info(
+            "Unable to disable a_service: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": None,
+                "instance": None,
+            }
+        )
+
+    def test_node(self):
+        self.assert_message_from_info(
+            "a_node: Unable to disable a_service: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": "a_node",
+                "instance": None,
+            }
+        )
+
+    def test_instance(self):
+        self.assert_message_from_info(
+            "Unable to disable a_service at an_instance: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": None,
+                "instance": "an_instance",
+            }
+        )
+
+    def test_all(self):
+        self.assert_message_from_info(
+            "a_node: Unable to disable a_service at an_instance: a_reason",
+            {
+                "service": "a_service",
+                "reason": "a_reason",
+                "node": "a_node",
+                "instance": "an_instance",
+            }
+        )
+
+
+class ServiceDisableSuccess(NameBuildTest):
+    code = codes.SERVICE_DISABLE_SUCCESS
+    def test_minimal(self):
+        self.assert_message_from_info(
+            "a_service disabled",
+            {
+                "service": "a_service",
+                "node": None,
+                "instance": None,
+            }
+        )
+
+    def test_node(self):
+        self.assert_message_from_info(
+            "a_node: a_service disabled",
+            {
+                "service": "a_service",
+                "node": "a_node",
+                "instance": None,
+            }
+        )
+
+    def test_instance(self):
+        self.assert_message_from_info(
+            "a_service at an_instance disabled",
+            {
+                "service": "a_service",
+                "node": None,
+                "instance": "an_instance",
+            }
+        )
+
+    def test_all(self):
+        self.assert_message_from_info(
+            "a_node: a_service at an_instance disabled",
+            {
+                "service": "a_service",
+                "node": "a_node",
+                "instance": "an_instance",
+            }
+        )
diff --git a/pcs/cli/resource/parse_args.py b/pcs/cli/resource/parse_args.py
index 19ee8f9..1bdcd5b 100644
--- a/pcs/cli/resource/parse_args.py
+++ b/pcs/cli/resource/parse_args.py
@@ -58,7 +58,7 @@ def parse_create(arg_list):
 
 def _parse_bundle_groups(arg_list):
     repeatable_keyword_list = ["port-map", "storage-map"]
-    keyword_list = ["container", "network"] + repeatable_keyword_list
+    keyword_list = ["meta", "container", "network"] + repeatable_keyword_list
     groups = group_by_keywords(
         arg_list,
         set(keyword_list),
@@ -84,7 +84,7 @@ def _parse_bundle_groups(arg_list):
 def parse_bundle_create_options(arg_list):
     groups = _parse_bundle_groups(arg_list)
     container_options = groups.get("container", [])
-    container_type = None
+    container_type = ""
     if container_options and "=" not in container_options[0]:
         container_type = container_options.pop(0)
     parts = {
@@ -99,9 +99,8 @@ def parse_bundle_create_options(arg_list):
             prepare_options(storage_map)
             for storage_map in groups.get("storage-map", [])
         ],
+        "meta": prepare_options(groups.get("meta", []))
     }
-    if not parts["container_type"]:
-        parts["container_type"] = "docker"
     return parts
 
 def _split_bundle_map_update_op_and_options(
@@ -144,6 +143,7 @@ def parse_bundle_update_options(arg_list):
         "port_map_remove": port_map["remove"],
         "storage_map_add": storage_map["add"],
         "storage_map_remove": storage_map["remove"],
+        "meta": prepare_options(groups.get("meta", []))
     }
     return parts
 
diff --git a/pcs/cli/resource/test/test_parse_args.py b/pcs/cli/resource/test/test_parse_args.py
index 5033ec7..791b60d 100644
--- a/pcs/cli/resource/test/test_parse_args.py
+++ b/pcs/cli/resource/test/test_parse_args.py
@@ -215,11 +215,12 @@ class ParseBundleCreateOptions(TestCase):
         self.assert_produce(
             [],
             {
-                "container_type": "docker",
+                "container_type": "",
                 "container": {},
                 "network": {},
                 "port_map": [],
                 "storage_map": [],
+                "meta": {},
             }
         )
 
@@ -228,13 +229,14 @@ class ParseBundleCreateOptions(TestCase):
 
     def test_container_type(self):
         self.assert_produce(
-            ["container", "lxc"],
+            ["container", "docker"],
             {
-                "container_type": "lxc",
+                "container_type": "docker",
                 "container": {},
                 "network": {},
                 "port_map": [],
                 "storage_map": [],
+                "meta": {},
             }
         )
 
@@ -242,23 +244,25 @@ class ParseBundleCreateOptions(TestCase):
         self.assert_produce(
             ["container", "a=b", "c=d"],
             {
-                "container_type": "docker",
+                "container_type": "",
                 "container": {"a": "b", "c": "d"},
                 "network": {},
                 "port_map": [],
                 "storage_map": [],
+                "meta": {},
             }
         )
 
     def test_container_type_and_options(self):
         self.assert_produce(
-            ["container", "lxc", "a=b", "c=d"],
+            ["container", "docker", "a=b", "c=d"],
             {
-                "container_type": "lxc",
+                "container_type": "docker",
                 "container": {"a": "b", "c": "d"},
                 "network": {},
                 "port_map": [],
                 "storage_map": [],
+                "meta": {},
             }
         )
 
@@ -275,11 +279,12 @@ class ParseBundleCreateOptions(TestCase):
         self.assert_produce(
             ["network", "a=b", "c=d"],
             {
-                "container_type": "docker",
+                "container_type": "",
                 "container": {},
                 "network": {"a": "b", "c": "d"},
                 "port_map": [],
                 "storage_map": [],
+                "meta": {},
             }
         )
 
@@ -304,11 +309,12 @@ class ParseBundleCreateOptions(TestCase):
         self.assert_produce(
             ["port-map", "a=b", "c=d"],
             {
-                "container_type": "docker",
+                "container_type": "",
                 "container": {},
                 "network": {},
                 "port_map": [{"a": "b", "c": "d"}],
                 "storage_map": [],
+                "meta": {},
             }
         )
 
@@ -316,11 +322,12 @@ class ParseBundleCreateOptions(TestCase):
         self.assert_produce(
             ["port-map", "a=b", "c=d", "port-map", "e=f"],
             {
-                "container_type": "docker",
+                "container_type": "",
                 "container": {},
                 "network": {},
                 "port_map": [{"a": "b", "c": "d"}, {"e": "f"}],
                 "storage_map": [],
+                "meta": {},
             }
         )
 
@@ -342,11 +349,12 @@ class ParseBundleCreateOptions(TestCase):
         self.assert_produce(
             ["storage-map", "a=b", "c=d"],
             {
-                "container_type": "docker",
+                "container_type": "",
                 "container": {},
                 "network": {},
                 "port_map": [],
                 "storage_map": [{"a": "b", "c": "d"}],
+                "meta": {},
             }
         )
 
@@ -354,11 +362,12 @@ class ParseBundleCreateOptions(TestCase):
         self.assert_produce(
             ["storage-map", "a=b", "c=d", "storage-map", "e=f"],
             {
-                "container_type": "docker",
+                "container_type": "",
                 "container": {},
                 "network": {},
                 "port_map": [],
                 "storage_map": [{"a": "b", "c": "d"}, {"e": "f"}],
+                "meta": {},
             }
         )
 
@@ -368,22 +377,46 @@ class ParseBundleCreateOptions(TestCase):
     def test_storage_map_missing_key(self):
         self.assert_raises_cmdline(["storage-map", "=b", "c=d"])
 
+    def test_meta(self):
+        self.assert_produce(
+            ["meta", "a=b", "c=d"],
+            {
+                "container_type": "",
+                "container": {},
+                "network": {},
+                "port_map": [],
+                "storage_map": [],
+                "meta": {"a": "b", "c": "d"},
+            }
+        )
+
+    def test_meta_empty(self):
+        self.assert_raises_cmdline(["meta"])
+
+    def test_meta_missing_value(self):
+        self.assert_raises_cmdline(["meta", "a", "c=d"])
+
+    def test_meta_missing_key(self):
+        self.assert_raises_cmdline(["meta", "=b", "c=d"])
+
     def test_all(self):
         self.assert_produce(
             [
-                "container", "lxc", "a=b", "c=d",
+                "container", "docker", "a=b", "c=d",
                 "network", "e=f", "g=h",
                 "port-map", "i=j", "k=l",
                 "port-map", "m=n", "o=p",
                 "storage-map", "q=r", "s=t",
                 "storage-map", "u=v", "w=x",
+                "meta", "y=z", "A=B",
             ],
             {
-                "container_type": "lxc",
+                "container_type": "docker",
                 "container": {"a": "b", "c": "d"},
                 "network": {"e": "f", "g": "h"},
                 "port_map": [{"i": "j", "k": "l"}, {"m": "n", "o": "p"}],
                 "storage_map": [{"q": "r", "s": "t"}, {"u": "v", "w": "x"}],
+                "meta": {"y": "z", "A": "B"},
             }
         )
 
@@ -391,20 +424,23 @@ class ParseBundleCreateOptions(TestCase):
         self.assert_produce(
             [
                 "storage-map", "q=r", "s=t",
+                "meta", "y=z",
                 "port-map", "i=j", "k=l",
                 "network", "e=f",
-                "container", "lxc", "a=b",
+                "container", "docker", "a=b",
                 "storage-map", "u=v", "w=x",
                 "port-map", "m=n", "o=p",
+                "meta", "A=B",
                 "network", "g=h",
                 "container", "c=d",
             ],
             {
-                "container_type": "lxc",
+                "container_type": "docker",
                 "container": {"a": "b", "c": "d"},
                 "network": {"e": "f", "g": "h"},
                 "port_map": [{"i": "j", "k": "l"}, {"m": "n", "o": "p"}],
                 "storage_map": [{"q": "r", "s": "t"}, {"u": "v", "w": "x"}],
+                "meta": {"y": "z", "A": "B"},
             }
         )
 
@@ -432,6 +468,7 @@ class ParseBundleUpdateOptions(TestCase):
                 "port_map_remove": [],
                 "storage_map_add": [],
                 "storage_map_remove": [],
+                "meta": {},
             }
         )
 
@@ -445,6 +482,7 @@ class ParseBundleUpdateOptions(TestCase):
                 "port_map_remove": [],
                 "storage_map_add": [],
                 "storage_map_remove": [],
+                "meta": {},
             }
         )
 
@@ -467,6 +505,7 @@ class ParseBundleUpdateOptions(TestCase):
                 "port_map_remove": [],
                 "storage_map_add": [],
                 "storage_map_remove": [],
+                "meta": {},
             }
         )
 
@@ -519,6 +558,7 @@ class ParseBundleUpdateOptions(TestCase):
                 "port_map_remove": ["c", "d", "i"],
                 "storage_map_add": [],
                 "storage_map_remove": [],
+                "meta": {},
             }
         )
 
@@ -562,9 +602,34 @@ class ParseBundleUpdateOptions(TestCase):
                     {"e": "f", "g": "h",},
                 ],
                 "storage_map_remove": ["c", "d", "i"],
+                "meta": {},
             }
         )
 
+    def test_meta(self):
+        self.assert_produce(
+            ["meta", "a=b", "c=d"],
+            {
+                "container": {},
+                "network": {},
+                "port_map_add": [],
+                "port_map_remove": [],
+                "storage_map_add": [],
+                "storage_map_remove": [],
+                "meta": {"a": "b", "c": "d"},
+            }
+        )
+
+    def test_meta_empty(self):
+        self.assert_raises_cmdline(["meta"])
+
+    def test_meta_missing_value(self):
+        self.assert_raises_cmdline(["meta", "a", "c=d"])
+
+    def test_meta_missing_key(self):
+        self.assert_raises_cmdline(["meta", "=b", "c=d"])
+
+
     def test_all(self):
         self.assert_produce(
             [
@@ -578,6 +643,7 @@ class ParseBundleUpdateOptions(TestCase):
                 "storage-map", "add", "v=w",
                 "storage-map", "remove", "x", "y",
                 "storage-map", "remove", "z",
+                "meta", "A=B", "C=D",
             ],
             {
                 "container": {"a": "b", "c": "d"},
@@ -592,6 +658,7 @@ class ParseBundleUpdateOptions(TestCase):
                     {"v": "w"},
                 ],
                 "storage_map_remove": ["x", "y", "z"],
+                "meta": {"A": "B", "C": "D"},
             }
         )
 
@@ -599,11 +666,13 @@ class ParseBundleUpdateOptions(TestCase):
         self.assert_produce(
             [
                 "storage-map", "remove", "x", "y",
+                "meta", "A=B",
                 "port-map", "remove", "o", "p",
                 "network", "e=f", "g=h",
                 "storage-map", "add", "r=s", "t=u",
                 "port-map", "add", "i=j", "k=l",
                 "container", "a=b", "c=d",
+                "meta", "C=D",
                 "port-map", "remove", "q",
                 "storage-map", "remove", "z",
                 "storage-map", "add", "v=w",
@@ -622,6 +691,7 @@ class ParseBundleUpdateOptions(TestCase):
                     {"v": "w"},
                 ],
                 "storage_map_remove": ["x", "y", "z"],
+                "meta": {"A": "B", "C": "D"},
             }
         )
 
diff --git a/pcs/cluster.py b/pcs/cluster.py
index d64194d..cbf5726 100644
--- a/pcs/cluster.py
+++ b/pcs/cluster.py
@@ -70,7 +70,11 @@ from pcs.lib.node import NodeAddresses, NodeAddressesList
 from pcs.lib.nodes_task import check_corosync_offline_on_nodes, distribute_files
 from pcs.lib import node_communication_format
 import pcs.lib.pacemaker.live as lib_pacemaker
-from pcs.lib.tools import environment_file_to_dict, generate_key
+from pcs.lib.tools import (
+    environment_file_to_dict,
+    generate_binary_key,
+    generate_key,
+)
 
 def cluster_cmd(argv):
     if len(argv) == 0:
@@ -298,6 +302,18 @@ def cluster_certkey(argv):
 
 
 def cluster_setup(argv):
+    modifiers = utils.get_modificators()
+    allowed_encryption_values = ["0", "1"]
+    if modifiers["encryption"] not in allowed_encryption_values:
+        process_library_reports([
+            lib_reports.invalid_option_value(
+                "--encryption",
+                modifiers["encryption"],
+                allowed_encryption_values,
+                severity=ReportItemSeverity.ERROR,
+                forceable=None
+            )
+        ])
     if len(argv) < 2:
         usage.cluster(["setup"])
         sys.exit(1)
@@ -380,7 +396,8 @@ def cluster_setup(argv):
             node_list,
             options["transport_options"],
             options["totem_options"],
-            options["quorum_options"]
+            options["quorum_options"],
+            modifiers["encryption"] == "1"
         )
     process_library_reports(messages)
 
@@ -425,9 +442,9 @@ def cluster_setup(argv):
     else:
         # verify and ensure no cluster is set up on the nodes
         # checks that nodes are authenticated as well
+        lib_env = utils.get_lib_env()
         if "--force" not in utils.pcs_options:
             all_nodes_available = True
-            lib_env = utils.get_lib_env()
             for node in primary_addr_list:
                 available, message = utils.canAddNodeToCluster(
                     lib_env.node_communicator(),
@@ -452,11 +469,12 @@ def cluster_setup(argv):
             file_definitions.update(
                 node_communication_format.pcmk_authkey_file(generate_key())
             )
-            file_definitions.update(
-                node_communication_format.corosync_authkey_file(
-                    generate_key(random_bytes_count=128)
+            if modifiers["encryption"] == "1":
+                file_definitions.update(
+                    node_communication_format.corosync_authkey_file(
+                        generate_binary_key(random_bytes_count=128)
+                    )
                 )
-            )
 
             distribute_files(
                 lib_env.node_communicator(),
@@ -465,6 +483,7 @@ def cluster_setup(argv):
                 NodeAddressesList(
                     [NodeAddresses(node) for node in primary_addr_list]
                 ),
+                skip_offline_nodes=modifiers["skip_offline_nodes"],
                 allow_incomplete_distribution="--force" in utils.pcs_options
             )
         except LibraryError as e: #Theoretically, this should not happen
@@ -515,7 +534,9 @@ def cluster_setup(argv):
 
         # sync certificates as the last step because it restarts pcsd
         print()
-        pcsd.pcsd_sync_certs([], exit_after_error=False)
+        pcsd.pcsd_sync_certs(
+            [], exit_after_error=False, async_restart=modifiers["async"]
+        )
         if wait:
             print()
             wait_for_nodes_started(primary_addr_list, wait_timeout)
@@ -733,7 +754,8 @@ def cluster_setup_parse_options_cman(options, force=False):
     return parsed, messages
 
 def cluster_setup_create_corosync_conf(
-    cluster_name, node_list, transport_options, totem_options, quorum_options
+    cluster_name, node_list, transport_options, totem_options, quorum_options,
+    encrypted
 ):
     messages = []
 
@@ -749,6 +771,8 @@ def cluster_setup_create_corosync_conf(
 
     totem_section.add_attribute("version", "2")
     totem_section.add_attribute("cluster_name", cluster_name)
+    if not encrypted:
+        totem_section.add_attribute("secauth", "off")
 
     transport_options_names = (
         "transport",
@@ -1757,10 +1781,14 @@ def node_add(lib_env, node0, node1, modifiers):
                 NodeAddressesList([node_addr]),
             )
 
+        # do not send pcmk authkey to guest and remote nodes, they either have
+        # it or are not working anyway
+        # if the cluster is stopped, we cannot get the cib anyway
         _share_authkey(
             lib_env,
-            get_nodes(lib_env.get_corosync_conf(), lib_env.get_cib()),
+            get_nodes(lib_env.get_corosync_conf()),
             node_addr,
+            skip_offline_nodes=modifiers["skip_offline_nodes"],
             allow_incomplete_distribution=modifiers["skip_offline_nodes"]
         )
 
@@ -2112,15 +2140,30 @@ def cluster_reload(argv):
 # Code taken from cluster-clean script in pacemaker
 def cluster_destroy(argv):
     if "--all" in utils.pcs_options:
+        # destroy remote and guest nodes
+        cib = None
         lib_env = utils.get_lib_env()
-        all_remote_nodes = get_nodes(tree=lib_env.get_cib())
-        if len(all_remote_nodes) > 0:
-            _destroy_pcmk_remote_env(
-                lib_env,
-                all_remote_nodes,
-                allow_fails=True
+        try:
+            cib = lib_env.get_cib()
+        except LibraryError as e:
+            warn(
+                "Unable to load CIB to get guest and remote nodes from it, "
+                "those nodes will not be deconfigured."
             )
+        if cib is not None:
+            try:
+                all_remote_nodes = get_nodes(tree=cib)
+                if len(all_remote_nodes) > 0:
+                    _destroy_pcmk_remote_env(
+                        lib_env,
+                        all_remote_nodes,
+                        skip_offline_nodes=True,
+                        allow_fails=True
+                    )
+            except LibraryError as e:
+                utils.process_library_reports(e.args)
 
+        # destroy full-stack nodes
         destroy_cluster(utils.getNodesFromCorosyncConf())
     else:
         print("Shutting down pacemaker/corosync services...")
diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
index 2e96f6b..ebc3cc7 100644
--- a/pcs/common/report_codes.py
+++ b/pcs/common/report_codes.py
@@ -100,6 +100,7 @@ COROSYNC_QUORUM_GET_STATUS_ERROR = "COROSYNC_QUORUM_GET_STATUS_ERROR"
 COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR = "COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR"
 COROSYNC_RUNNING_ON_NODE = "COROSYNC_RUNNING_ON_NODE"
 CRM_MON_ERROR = "CRM_MON_ERROR"
+DEPRECATED_OPTION = "DEPRECATED_OPTION"
 DUPLICATE_CONSTRAINTS_EXIST = "DUPLICATE_CONSTRAINTS_EXIST"
 EMPTY_RESOURCE_SET_LIST = "EMPTY_RESOURCE_SET_LIST"
 EMPTY_ID = "EMPTY_ID"
diff --git a/pcs/config.py b/pcs/config.py
index 94191e1..5526eb5 100644
--- a/pcs/config.py
+++ b/pcs/config.py
@@ -446,12 +446,12 @@ def config_backup_path_list(with_uid_gid=False, force_rhel6=None):
         "uname": settings.pacemaker_uname,
         "gname": settings.pacemaker_gname,
     }
-    pcmk_authkey_attrs = dict(cib_attrs)
-    pcmk_authkey_attrs["mode"] = 0o440
     if with_uid_gid:
         cib_attrs["uid"] = _get_uid(cib_attrs["uname"])
         cib_attrs["gid"] = _get_gid(cib_attrs["gname"])
 
+    pcmk_authkey_attrs = dict(cib_attrs)
+    pcmk_authkey_attrs["mode"] = 0o440
     file_list = {
         "cib.xml": {
             "path": os.path.join(settings.cib_dir, "cib.xml"),
diff --git a/pcs/lib/booth/env.py b/pcs/lib/booth/env.py
index e80b8c9..97e215b 100644
--- a/pcs/lib/booth/env.py
+++ b/pcs/lib/booth/env.py
@@ -99,8 +99,8 @@ class BoothEnv(object):
     def command_expect_live_env(self):
         if not self.__config.is_live:
             raise LibraryError(common_reports.live_environment_required([
-                "--booth-conf",
-                "--booth-key",
+                "BOOTH_CONF",
+                "BOOTH_KEY",
             ]))
 
     def set_key_path(self, path):
diff --git a/pcs/lib/cib/nvpair.py b/pcs/lib/cib/nvpair.py
index 261d17c..d3f5a5c 100644
--- a/pcs/lib/cib/nvpair.py
+++ b/pcs/lib/cib/nvpair.py
@@ -11,18 +11,19 @@ from functools import partial
 from pcs.lib.cib.tools import create_subelement_id
 from pcs.lib.xml_tools import get_sub_element
 
-def _append_new_nvpair(nvset_element, name, value):
+def _append_new_nvpair(nvset_element, name, value, id_provider=None):
     """
     Create nvpair with name and value as subelement of nvset_element.
 
     etree.Element nvset_element is context of new nvpair
     string name is name attribute of new nvpair
     string value is value attribute of new nvpair
+    IdProvider id_provider -- elements' ids generator
     """
     etree.SubElement(
         nvset_element,
         "nvpair",
-        id=create_subelement_id(nvset_element, name),
+        id=create_subelement_id(nvset_element, name, id_provider),
         name=name,
         value=value
     )
@@ -73,7 +74,7 @@ def arrange_first_nvset(tag_name, context_element, nvpair_dict):
 
     update_nvset(nvset_element, nvpair_dict)
 
-def append_new_nvset(tag_name, context_element, nvpair_dict):
+def append_new_nvset(tag_name, context_element, nvpair_dict, id_provider=None):
     """
     Append new nvset_element comprising nvpairs children (corresponding
     nvpair_dict) to the context_element
@@ -81,12 +82,13 @@ def append_new_nvset(tag_name, context_element, nvpair_dict):
     string tag_name should be "instance_attributes" or "meta_attributes"
     etree.Element context_element is element where new nvset will be appended
     dict nvpair_dict contains source for nvpair children
+    IdProvider id_provider -- elements' ids generator
     """
     nvset_element = etree.SubElement(context_element, tag_name, {
-        "id": create_subelement_id(context_element, tag_name)
+        "id": create_subelement_id(context_element, tag_name, id_provider)
     })
     for name, value in sorted(nvpair_dict.items()):
-        _append_new_nvpair(nvset_element, name, value)
+        _append_new_nvpair(nvset_element, name, value, id_provider)
 
 append_new_instance_attributes = partial(
     append_new_nvset,
diff --git a/pcs/lib/cib/resource/bundle.py b/pcs/lib/cib/resource/bundle.py
index 0fe16f3..8a49c28 100644
--- a/pcs/lib/cib/resource/bundle.py
+++ b/pcs/lib/cib/resource/bundle.py
@@ -9,6 +9,10 @@ from lxml import etree
 
 from pcs.common import report_codes
 from pcs.lib import reports, validate
+from pcs.lib.cib.nvpair import (
+    append_new_meta_attributes,
+    arrange_first_meta_attributes,
+)
 from pcs.lib.cib.resource.primitive import TAG as TAG_PRIMITIVE
 from pcs.lib.cib.tools import find_element_by_tag_and_id
 from pcs.lib.errors import (
@@ -96,7 +100,7 @@ def validate_new(
 
 def append_new(
     parent_element, id_provider, bundle_id, container_type, container_options,
-    network_options, port_map, storage_map
+    network_options, port_map, storage_map, meta_attributes
 ):
     """
     Create new bundle and add it to the CIB
@@ -109,6 +113,7 @@ def append_new(
     dict network_options -- network options
     list of dict port_map -- list of port mapping options
     list of dict storage_map -- list of storage mapping options
+    dict meta_attributes -- meta attributes
     """
     bundle_element = etree.SubElement(parent_element, TAG, {"id": bundle_id})
     # TODO create the proper element once more container_types are supported
@@ -132,6 +137,8 @@ def append_new(
         _append_storage_map(
             storage_element, id_provider, bundle_id, storage_map_options
         )
+    if meta_attributes:
+        append_new_meta_attributes(bundle_element, meta_attributes, id_provider)
     return bundle_element
 
 def validate_update(
@@ -203,7 +210,8 @@ def validate_update(
 
 def update(
     id_provider, bundle_el, container_options, network_options,
-    port_map_add, port_map_remove, storage_map_add, storage_map_remove
+    port_map_add, port_map_remove, storage_map_add, storage_map_remove,
+    meta_attributes
 ):
     """
     Modify an existing bundle (does not touch encapsulated resources)
@@ -216,6 +224,7 @@ def update(
     list of string port_map_remove -- list of port mapping ids to remove
     list of dict storage_map_add -- list of storage mapping options to add
     list of string storage_map_remove -- list of storage mapping ids to remove
+    dict meta_attributes -- meta attributes to update
     """
     bundle_id = bundle_el.get("id")
     update_attributes_remove_empty(
@@ -253,7 +262,11 @@ def update(
             storage_element, id_provider, bundle_id, storage_map_options
         )
 
+    if meta_attributes:
+        arrange_first_meta_attributes(bundle_el, meta_attributes)
+
     # remove empty elements with no attributes
+    # meta attributes are handled in their own function
     for element in (network_element, storage_element):
         if len(element) < 1 and not element.attrib:
             element.getparent().remove(element)
diff --git a/pcs/lib/cib/resource/common.py b/pcs/lib/cib/resource/common.py
index f9028ff..0e52b4c 100644
--- a/pcs/lib/cib/resource/common.py
+++ b/pcs/lib/cib/resource/common.py
@@ -58,16 +58,18 @@ def find_resources_to_enable(resource_el):
     etree resource_el -- resource element
     """
     if is_bundle(resource_el):
-        # bundles currently cannot be disabled - pcmk does not support that
-        # inner resources are supposed to be managed separately
-        return []
+        to_enable = [resource_el]
+        in_bundle = get_bundle_inner_resource(resource_el)
+        if in_bundle is not None:
+            to_enable.append(in_bundle)
+        return to_enable
 
     if is_any_clone(resource_el):
         return [resource_el, get_clone_inner_resource(resource_el)]
 
     to_enable = [resource_el]
     parent = resource_el.getparent()
-    if is_any_clone(parent):
+    if is_any_clone(parent) or is_bundle(parent):
         to_enable.append(parent)
     return to_enable
 
@@ -109,20 +111,25 @@ def find_resources_to_manage(resource_el):
     # put there manually. If we didn't do it, the resource may stay unmanaged,
     # as a managed primitive in an unmanaged clone / group is still unmanaged
     # and vice versa.
-    # Bundle resources cannot be set as unmanaged - pcmk currently doesn't
-    # support that. Resources in a bundle are supposed to be treated separately.
-    if is_bundle(resource_el):
-        return []
     res_id = resource_el.attrib["id"]
     return (
         [resource_el] # the resource itself
         +
         # its parents
         find_parent(resource_el, "resources").xpath(
+            # a master or a clone which contains a group, a primitve, or a
+            # grouped primitive with the specified id
+            # OR
+            # a group (in a clone, master, etc. - hence //) which contains a
+            # primitive with the specified id
+            # OR
+            # a bundle which contains a primitive with the specified id
             """
                 (./master|./clone)[(group|group/primitive|primitive)[@id='{r}']]
                 |
                 //group[primitive[@id='{r}']]
+                |
+                ./bundle[primitive[@id='{r}']]
             """
             .format(r=res_id)
         )
@@ -164,10 +171,19 @@ def find_resources_to_unmanage(resource_el):
     #   See clone notes above
     #
     # a bundled primitive - the primitive - the primitive
-    # a bundled primitive - the bundle - nothing
-    #  bundles currently cannot be set as unmanaged - pcmk does not support that
-    # an empty bundle - the bundle - nothing
-    #  bundles currently cannot be set as unmanaged - pcmk does not support that
+    # a bundled primitive - the bundle - the bundle and the primitive
+    #  We need to unmanage implicit resources create by pacemaker and there is
+    #  no other way to do it than unmanage the bundle itself.
+    #  Since it is not possible to unbundle a resource, the concers described
+    #  at unclone don't apply here. However to prevent future bugs, in case
+    #  unbundling becomes possible, we unmanage the primitive as well.
+    # an empty bundle - the bundle - the bundle
+    #  There is nothing else to unmanage.
+    if is_bundle(resource_el):
+        in_bundle = get_bundle_inner_resource(resource_el)
+        return (
+            [resource_el, in_bundle] if in_bundle is not None else [resource_el]
+        )
     if is_any_clone(resource_el):
         resource_el = get_clone_inner_resource(resource_el)
     if is_group(resource_el):
diff --git a/pcs/lib/cib/resource/operations.py b/pcs/lib/cib/resource/operations.py
index 9d8b2ef..a34db45 100644
--- a/pcs/lib/cib/resource/operations.py
+++ b/pcs/lib/cib/resource/operations.py
@@ -13,7 +13,11 @@ from pcs.common import report_codes
 from pcs.lib import reports, validate
 from pcs.lib.resource_agent import get_default_interval, complete_all_intervals
 from pcs.lib.cib.nvpair import append_new_instance_attributes
-from pcs.lib.cib.tools import create_subelement_id
+from pcs.lib.cib.tools import (
+    create_subelement_id,
+    does_id_exist,
+)
+from pcs.lib.errors import LibraryError
 from pcs.lib.pacemaker.values import (
     is_true,
     timeout_to_seconds,
@@ -149,6 +153,7 @@ def validate_operation_list(
             code_to_allow_extra_values=report_codes.FORCE_OPTIONS,
             allow_extra_values=allow_invalid,
         ),
+        validate.value_id("id", option_name_for_report="operation id"),
     ]
     report_list = []
     for operation in operation_list:
@@ -308,13 +313,17 @@ def append_new_operation(operations_element, options):
         (key, value) for key, value in options.items()
         if key not in OPERATION_NVPAIR_ATTRIBUTES
     )
-    attribute_map.update({
-        "id": create_id(
-            operations_element.getparent(),
-            options["name"],
-            options["interval"]
-        )
-    })
+    if "id" in attribute_map:
+        if does_id_exist(operations_element, attribute_map["id"]):
+            raise LibraryError(reports.id_already_exists(attribute_map["id"]))
+    else:
+        attribute_map.update({
+            "id": create_id(
+                operations_element.getparent(),
+                options["name"],
+                options["interval"]
+            )
+        })
     op_element = etree.SubElement(
         operations_element,
         "op",
diff --git a/pcs/lib/cib/resource/primitive.py b/pcs/lib/cib/resource/primitive.py
index 664aad4..0560182 100644
--- a/pcs/lib/cib/resource/primitive.py
+++ b/pcs/lib/cib/resource/primitive.py
@@ -32,6 +32,7 @@ def create(
     allow_invalid_operation=False,
     allow_invalid_instance_attributes=False,
     use_default_operations=True,
+    resource_type="resource"
 ):
     """
     Prepare all parts of primitive resource and append it into cib.
@@ -48,6 +49,7 @@ def create(
         instance_attributes
     bool use_default_operations is flag for completion operations with default
         actions specified in resource agent
+    string resource_type -- describes the resource for reports
     """
     if raw_operation_list is None:
         raw_operation_list = []
@@ -58,7 +60,7 @@ def create(
 
     if does_id_exist(resources_section, resource_id):
         raise LibraryError(reports.id_already_exists(resource_id))
-    validate_id(resource_id, "resource name")
+    validate_id(resource_id, "{0} name".format(resource_type))
 
     operation_list = prepare_operations(
         report_processor,
@@ -73,7 +75,7 @@ def create(
     report_processor.process_list(
         resource_agent.validate_parameters(
             instance_attributes,
-            parameters_type="resource",
+            parameters_type=resource_type,
             allow_invalid=allow_invalid_instance_attributes,
         )
     )
diff --git a/pcs/lib/cib/test/test_nvpair.py b/pcs/lib/cib/test/test_nvpair.py
index 9b9d9b9..0f6d8f8 100644
--- a/pcs/lib/cib/test/test_nvpair.py
+++ b/pcs/lib/cib/test/test_nvpair.py
@@ -8,6 +8,7 @@ from __future__ import (
 from lxml import etree
 
 from pcs.lib.cib import nvpair
+from pcs.lib.cib.tools import IdProvider
 from pcs.test.tools.assertions import assert_xml_equal
 from pcs.test.tools.pcs_unittest import TestCase, mock
 from pcs.test.tools.xml import etree_to_str
@@ -25,6 +26,21 @@ class AppendNewNvpair(TestCase):
             """
         )
 
+    def test_with_id_provider(self):
+        nvset_element = etree.fromstring('<nvset id="a"/>')
+        provider = IdProvider(nvset_element)
+        provider.book_ids("a-b")
+        nvpair._append_new_nvpair(nvset_element, "b", "c", provider)
+        assert_xml_equal(
+            etree_to_str(nvset_element),
+            """
+            <nvset id="a">
+                <nvpair id="a-b-1" name="b" value="c"></nvpair>
+            </nvset>
+            """
+        )
+
+
 class UpdateNvsetTest(TestCase):
     @mock.patch(
         "pcs.lib.cib.nvpair.create_subelement_id",
@@ -167,6 +183,32 @@ class AppendNewNvsetTest(TestCase):
             etree_to_str(context_element)
         )
 
+    def test_with_id_provider(self):
+        context_element = etree.fromstring('<context id="a"/>')
+        provider = IdProvider(context_element)
+        provider.book_ids("a-instance_attributes", "a-instance_attributes-1-a")
+        nvpair.append_new_nvset(
+            "instance_attributes",
+            context_element,
+            {
+                "a": "b",
+                "c": "d",
+            },
+            provider
+        )
+        assert_xml_equal(
+            """
+                <context id="a">
+                    <instance_attributes id="a-instance_attributes-1">
+                        <nvpair id="a-instance_attributes-1-a-1" name="a" value="b"/>
+                        <nvpair id="a-instance_attributes-1-c" name="c" value="d"/>
+                    </instance_attributes>
+                </context>
+            """,
+            etree_to_str(context_element)
+        )
+
+
 class ArrangeFirstNvsetTest(TestCase):
     def setUp(self):
         self.root = etree.Element("root", id="root")
diff --git a/pcs/lib/cib/test/test_resource_common.py b/pcs/lib/cib/test/test_resource_common.py
index 52c2329..6b485f7 100644
--- a/pcs/lib/cib/test/test_resource_common.py
+++ b/pcs/lib/cib/test/test_resource_common.py
@@ -180,7 +180,7 @@ class FindResourcesToEnable(TestCase):
         self.assert_find_resources("F2", ["F2"])
 
     def test_primitive_in_bundle(self):
-        self.assert_find_resources("H", ["H"])
+        self.assert_find_resources("H", ["H", "H-bundle"])
 
     def test_group(self):
         self.assert_find_resources("D", ["D"])
@@ -204,10 +204,10 @@ class FindResourcesToEnable(TestCase):
         self.assert_find_resources("F-master", ["F-master", "F"])
 
     def test_bundle_empty(self):
-        self.assert_find_resources("G-bundle", [])
+        self.assert_find_resources("G-bundle", ["G-bundle"])
 
     def test_bundle_with_primitive(self):
-        self.assert_find_resources("H-bundle", [])
+        self.assert_find_resources("H-bundle", ["H-bundle", "H"])
 
 
 class Enable(TestCase):
@@ -360,7 +360,7 @@ class FindResourcesToManage(TestCase):
         self.assert_find_resources("F2", ["F2", "F-master", "F"])
 
     def test_primitive_in_bundle(self):
-        self.assert_find_resources("H", ["H"])
+        self.assert_find_resources("H", ["H", "H-bundle"])
 
     def test_group(self):
         self.assert_find_resources("D", ["D", "D1", "D2"])
@@ -384,10 +384,10 @@ class FindResourcesToManage(TestCase):
         self.assert_find_resources("F-master", ["F-master", "F", "F1", "F2"])
 
     def test_bundle_empty(self):
-        self.assert_find_resources("G-bundle", [])
+        self.assert_find_resources("G-bundle", ["G-bundle"])
 
     def test_bundle_with_primitive(self):
-        self.assert_find_resources("H-bundle", [])
+        self.assert_find_resources("H-bundle", ["H-bundle", "H"])
 
 
 class FindResourcesToUnmanage(TestCase):
@@ -447,10 +447,10 @@ class FindResourcesToUnmanage(TestCase):
         self.assert_find_resources("F-master", ["F1", "F2"])
 
     def test_bundle_empty(self):
-        self.assert_find_resources("G-bundle", [])
+        self.assert_find_resources("G-bundle", ["G-bundle"])
 
     def test_bundle_with_primitive(self):
-        self.assert_find_resources("H-bundle", [])
+        self.assert_find_resources("H-bundle", ["H-bundle", "H"])
 
 
 class Manage(TestCase):
diff --git a/pcs/lib/cib/test/test_resource_operations.py b/pcs/lib/cib/test/test_resource_operations.py
index de2b507..42fa49d 100644
--- a/pcs/lib/cib/test/test_resource_operations.py
+++ b/pcs/lib/cib/test/test_resource_operations.py
@@ -318,6 +318,28 @@ class ValidateOperation(TestCase):
             ],
         )
 
+    def test_return_error_on_invalid_id(self):
+        self.assert_operation_produces_report(
+            {
+                "name": "monitor",
+                "id": "a#b",
+            },
+            [
+                (
+                    severities.ERROR,
+                    report_codes.INVALID_ID,
+                    {
+                        "id": "a#b",
+                        "id_description": "operation id",
+                        "invalid_character": "#",
+                        "is_first_char": False,
+                    },
+                    None
+                ),
+            ],
+        )
+
+
 class GetRemainingDefaults(TestCase):
     @mock.patch("pcs.lib.cib.resource.operations.make_unique_intervals")
     def test_returns_remining_operations(self, make_unique_intervals):
diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py
index 2308a42..cf91125 100644
--- a/pcs/lib/cib/tools.py
+++ b/pcs/lib/cib/tools.py
@@ -177,11 +177,11 @@ def find_element_by_tag_and_id(
         )
     )
 
-def create_subelement_id(context_element, suffix):
-    return find_unique_id(
-        context_element,
-        "{0}-{1}".format(context_element.get("id"), suffix)
-    )
+def create_subelement_id(context_element, suffix, id_provider=None):
+    proposed_id = "{0}-{1}".format(context_element.get("id"), suffix)
+    if id_provider:
+        return id_provider.allocate_id(proposed_id)
+    return find_unique_id(context_element, proposed_id)
 
 def check_new_id_applicable(tree, description, id):
     validate_id(id, description)
diff --git a/pcs/lib/commands/cluster.py b/pcs/lib/commands/cluster.py
index 7386e3c..a166ad5 100644
--- a/pcs/lib/commands/cluster.py
+++ b/pcs/lib/commands/cluster.py
@@ -21,18 +21,22 @@ from pcs.lib.errors import LibraryError
 from pcs.lib.pacemaker import state
 from pcs.lib.pacemaker.live import remove_node
 
-def _ensure_can_add_node_to_remote_cluster(env, node_addresses):
+def _ensure_can_add_node_to_remote_cluster(
+    env, node_addresses, warn_on_communication_exception=False
+):
     report_items = []
     nodes_task.check_can_add_node_to_cluster(
         env.node_communicator(),
         node_addresses,
         report_items,
-        check_response=nodes_task.availability_checker_remote_node
+        check_response=nodes_task.availability_checker_remote_node,
+        warn_on_communication_exception=warn_on_communication_exception,
     )
     env.report_processor.process_list(report_items)
 
 def _share_authkey(
     env, current_nodes, candidate_node_addresses,
+    skip_offline_nodes=False,
     allow_incomplete_distribution=False
 ):
     if env.pacemaker.has_authkey:
@@ -47,11 +51,14 @@ def _share_authkey(
         env.report_processor,
         node_communication_format.pcmk_authkey_file(authkey_content),
         node_addresses_list,
+        skip_offline_nodes,
         allow_incomplete_distribution,
         description="remote node configuration files"
     )
 
-def _start_and_enable_pacemaker_remote(env, node_list, allow_fails=False):
+def _start_and_enable_pacemaker_remote(
+    env, node_list, skip_offline_nodes=False, allow_fails=False
+):
     nodes_task.run_actions_on_multiple_nodes(
         env.node_communicator(),
         env.report_processor,
@@ -61,12 +68,14 @@ def _start_and_enable_pacemaker_remote(env, node_list, allow_fails=False):
         ]),
         lambda key, response: response.code == "success",
         node_list,
+        skip_offline_nodes,
         allow_fails,
         description="start of service pacemaker_remote"
     )
 
 def _prepare_pacemaker_remote_environment(
-    env, current_nodes, node_host, allow_incomplete_distribution, allow_fails
+    env, current_nodes, node_host, skip_offline_nodes,
+    allow_incomplete_distribution, allow_fails
 ):
     if not env.is_corosync_conf_live:
         env.report_processor.process_list([
@@ -88,14 +97,24 @@ def _prepare_pacemaker_remote_environment(
         return
 
     candidate_node = NodeAddresses(node_host)
-    _ensure_can_add_node_to_remote_cluster(env, candidate_node)
+    _ensure_can_add_node_to_remote_cluster(
+        env,
+        candidate_node,
+        skip_offline_nodes
+    )
     _share_authkey(
         env,
         current_nodes,
         candidate_node,
+        skip_offline_nodes,
         allow_incomplete_distribution
     )
-    _start_and_enable_pacemaker_remote(env, [candidate_node], allow_fails)
+    _start_and_enable_pacemaker_remote(
+        env,
+        [candidate_node],
+        skip_offline_nodes,
+        allow_fails
+    )
 
 def _ensure_resource_running(env, resource_id):
     env.report_processor.process(
@@ -117,6 +136,7 @@ def _ensure_consistently_live_env(env):
 
 def node_add_remote(
     env, host, node_name, operations, meta_attributes, instance_attributes,
+    skip_offline_nodes=False,
     allow_incomplete_distribution=False,
     allow_pacemaker_remote_service_fail=False,
     allow_invalid_operation=False,
@@ -132,6 +152,7 @@ def node_add_remote(
     dict meta_attributes contains attributes for primitive/meta_attributes
     dict instance_attributes contains attributes for
         primitive/instance_attributes
+    bool skip_offline_nodes -- a flag for ignoring when some nodes are offline
     bool allow_incomplete_distribution -- is a flag for allowing successfully
         finish this command even if is file distribution not succeeded
     bool allow_pacemaker_remote_service_fail -- is a flag for allowing
@@ -189,7 +210,7 @@ def node_add_remote(
         for report in report_list + list(e.args):
             if report.code != report_codes.ID_ALREADY_EXISTS:
                 unified_report_list.append(report)
-            elif report.info.get["id"] not in already_exists:
+            elif report.info["id"] not in already_exists:
                 unified_report_list.append(report)
                 already_exists.append(report.info["id"])
         report_list = unified_report_list
@@ -200,6 +221,7 @@ def node_add_remote(
         env,
         current_nodes,
         host,
+        skip_offline_nodes,
         allow_incomplete_distribution,
         allow_pacemaker_remote_service_fail,
     )
@@ -209,6 +231,7 @@ def node_add_remote(
 
 def node_add_guest(
     env, node_name, resource_id, options,
+    skip_offline_nodes=False,
     allow_incomplete_distribution=False,
     allow_pacemaker_remote_service_fail=False, wait=False,
 ):
@@ -220,6 +243,7 @@ def node_add_guest(
     string resource_id -- specifies resource that should be guest node
     dict options could contain keys remote-node, remote-port, remote-addr,
         remote-connect-timeout
+    bool skip_offline_nodes -- a flag for ignoring when some nodes are offline
     bool allow_incomplete_distribution -- is a flag for allowing successfully
         finish this command even if is file distribution not succeeded
     bool allow_pacemaker_remote_service_fail -- is a flag for allowing
@@ -263,6 +287,7 @@ def node_add_guest(
         env,
         current_nodes,
         guest_node.get_host_from_options(node_name, options),
+        skip_offline_nodes,
         allow_incomplete_distribution,
         allow_pacemaker_remote_service_fail,
     )
@@ -296,20 +321,18 @@ def _find_resources_to_remove(
 
     return resource_element_list
 
-def _remove_pcmk_remote_from_cib(
-    nodes, resource_element_list, get_host, remove_resource
-):
+def _get_node_addresses_from_resources(nodes, resource_element_list, get_host):
     node_addresses_set = set()
     for resource_element in resource_element_list:
         for node in nodes:
             #remote nodes uses ring0 only
             if get_host(resource_element) == node.ring0:
                 node_addresses_set.add(node)
-        remove_resource(resource_element)
-
     return sorted(node_addresses_set, key=lambda node: node.ring0)
 
-def _destroy_pcmk_remote_env(env, node_addresses_list, allow_fails):
+def _destroy_pcmk_remote_env(
+    env, node_addresses_list, skip_offline_nodes, allow_fails
+):
     actions = node_communication_format.create_pcmk_remote_actions([
         "stop",
         "disable",
@@ -324,6 +347,7 @@ def _destroy_pcmk_remote_env(env, node_addresses_list, allow_fails):
         actions,
         lambda key, response: response.code == "success",
         node_addresses_list,
+        skip_offline_nodes,
         allow_fails,
         description="stop of service pacemaker_remote"
     )
@@ -333,6 +357,7 @@ def _destroy_pcmk_remote_env(env, node_addresses_list, allow_fails):
         env.report_processor,
         files,
         node_addresses_list,
+        skip_offline_nodes,
         allow_fails,
         description="remote node files"
     )
@@ -356,6 +381,7 @@ def _report_skip_live_parts_in_remove(node_addresses_list):
 
 def node_remove_remote(
     env, node_identifier, remove_resource,
+    skip_offline_nodes=False,
     allow_remove_multiple_nodes=False,
     allow_pacemaker_remote_service_fail=False
 ):
@@ -365,6 +391,7 @@ def node_remove_remote(
     LibraryEnvironment env provides all for communication with externals
     string node_identifier -- node name or hostname
     callable remove_resource -- function for remove resource
+    bool skip_offline_nodes -- a flag for ignoring when some nodes are offline
     bool allow_remove_multiple_nodes -- is a flag for allowing
         remove unexpected multiple occurence of remote node for node_identifier
     bool allow_pacemaker_remote_service_fail -- is a flag for allowing
@@ -382,31 +409,36 @@ def node_remove_remote(
         allow_remove_multiple_nodes,
         remote_node.find_node_resources,
     )
-    node_addresses_list = _remove_pcmk_remote_from_cib(
+
+    node_addresses_list = _get_node_addresses_from_resources(
         get_nodes_remote(cib),
         resource_element_list,
         remote_node.get_host,
-        lambda resource_element: remove_resource(
-            resource_element.attrib["id"],
-            is_remove_remote_context=True,
-        )
     )
+
     if not env.is_corosync_conf_live:
         env.report_processor.process_list(
             _report_skip_live_parts_in_remove(node_addresses_list)
         )
-        return
+    else:
+        _destroy_pcmk_remote_env(
+            env,
+            node_addresses_list,
+            skip_offline_nodes,
+            allow_pacemaker_remote_service_fail
+        )
 
     #remove node from pcmk caches is currently integrated in remove_resource
     #function
-    _destroy_pcmk_remote_env(
-        env,
-        node_addresses_list,
-        allow_pacemaker_remote_service_fail
-    )
+    for resource_element in resource_element_list:
+        remove_resource(
+            resource_element.attrib["id"],
+            is_remove_remote_context=True,
+        )
 
 def node_remove_guest(
     env, node_identifier,
+    skip_offline_nodes=False,
     allow_remove_multiple_nodes=False,
     allow_pacemaker_remote_service_fail=False,
     wait=False,
@@ -416,6 +448,7 @@ def node_remove_guest(
 
     LibraryEnvironment env provides all for communication with externals
     string node_identifier -- node name, hostname or resource id
+    bool skip_offline_nodes -- a flag for ignoring when some nodes are offline
     bool allow_remove_multiple_nodes -- is a flag for allowing
         remove unexpected multiple occurence of remote node for node_identifier
     bool allow_pacemaker_remote_service_fail -- is a flag for allowing
@@ -435,29 +468,34 @@ def node_remove_guest(
         guest_node.find_node_resources,
     )
 
-    node_addresses_list =  _remove_pcmk_remote_from_cib(
+    node_addresses_list = _get_node_addresses_from_resources(
         get_nodes_guest(cib),
         resource_element_list,
         guest_node.get_host,
-        guest_node.unset_guest,
     )
-    env.push_cib(cib, wait)
 
     if not env.is_corosync_conf_live:
         env.report_processor.process_list(
             _report_skip_live_parts_in_remove(node_addresses_list)
         )
-        return
+    else:
+        _destroy_pcmk_remote_env(
+            env,
+            node_addresses_list,
+            skip_offline_nodes,
+            allow_pacemaker_remote_service_fail
+        )
+
+    for resource_element in resource_element_list:
+        guest_node.unset_guest(resource_element)
+
+    env.push_cib(cib, wait)
 
     #remove node from pcmk caches
-    for node_addresses in node_addresses_list:
-        remove_node(env.cmd_runner(), node_addresses.name)
+    if env.is_cib_live:
+        for node_addresses in node_addresses_list:
+            remove_node(env.cmd_runner(), node_addresses.name)
 
-    _destroy_pcmk_remote_env(
-        env,
-        node_addresses_list,
-        allow_pacemaker_remote_service_fail
-    )
 
 def node_clear(env, node_name, allow_clear_cluster_node=False):
     """
diff --git a/pcs/lib/commands/resource.py b/pcs/lib/commands/resource.py
index a9f8271..0c5f682 100644
--- a/pcs/lib/commands/resource.py
+++ b/pcs/lib/commands/resource.py
@@ -22,6 +22,7 @@ from pcs.lib.errors import LibraryError
 from pcs.lib.pacemaker.values import validate_id
 from pcs.lib.pacemaker.state import (
     ensure_resource_state,
+    info_resource_state,
     is_resource_managed,
     ResourceNotFound,
 )
@@ -31,7 +32,10 @@ from pcs.lib.resource_agent import(
 
 @contextmanager
 def resource_environment(
-    env, wait=False, wait_for_resource_ids=None, disabled_after_wait=False,
+    env,
+    wait=False,
+    wait_for_resource_ids=None,
+    resource_state_reporter=info_resource_state,
     required_cib_version=None
 ):
     env.ensure_wait_satisfiable(wait)
@@ -41,13 +45,26 @@ def resource_environment(
     if wait is not False and wait_for_resource_ids:
         state = env.get_cluster_state()
         env.report_processor.process_list([
-            ensure_resource_state(not disabled_after_wait, state, res_id)
+            resource_state_reporter(state, res_id)
             for res_id in wait_for_resource_ids
         ])
 
+def _ensure_disabled_after_wait(disabled_after_wait):
+    def inner(state, resource_id):
+        return ensure_resource_state(
+            not disabled_after_wait,
+            state,
+            resource_id
+        )
+    return inner
+
 def _validate_remote_connection(
-    nodes, resource_id, instance_attributes,  allow_not_suitable_command
+    resource_agent, nodes_to_validate_against, resource_id, instance_attributes,
+    allow_not_suitable_command
 ):
+    if resource_agent.get_name() != remote_node.AGENT_NAME.full_name:
+        return []
+
     report_list = []
     report_list.append(
         reports.get_problem_creator(
@@ -58,7 +75,7 @@ def _validate_remote_connection(
 
     report_list.extend(
         remote_node.validate_host_not_conflicts(
-            nodes,
+            nodes_to_validate_against,
             resource_id,
             instance_attributes
         )
@@ -66,8 +83,8 @@ def _validate_remote_connection(
     return report_list
 
 def _validate_guest_change(
-    tree, nodes, meta_attributes, allow_not_suitable_command,
-    detect_remove=False
+    tree, nodes_to_validate_against, meta_attributes,
+    allow_not_suitable_command, detect_remove=False
 ):
     if not guest_node.is_node_name_in_options(meta_attributes):
         return []
@@ -89,7 +106,7 @@ def _validate_guest_change(
     report_list.extend(
         guest_node.validate_conflicts(
             tree,
-            nodes,
+            nodes_to_validate_against,
             node_name,
             meta_attributes
         )
@@ -97,28 +114,54 @@ def _validate_guest_change(
 
     return report_list
 
-def _validate_special_cases(
-    nodes, resource_agent, resources_section, resource_id, meta_attributes,
+def _get_nodes_to_validate_against(env, tree):
+    if not env.is_corosync_conf_live and env.is_cib_live:
+        raise LibraryError(
+            reports.live_environment_required(["COROSYNC_CONF"])
+        )
+
+    if not env.is_cib_live and env.is_corosync_conf_live:
+        #we do not try to get corosync.conf from live cluster when cib is not
+        #taken from live cluster
+        return get_nodes(tree=tree)
+
+    return get_nodes(env.get_corosync_conf(), tree)
+
+
+def _check_special_cases(
+    env, resource_agent, resources_section, resource_id, meta_attributes,
     instance_attributes, allow_not_suitable_command
 ):
-    report_list = []
-
-    if resource_agent.get_name() == remote_node.AGENT_NAME.full_name:
-        report_list.extend(_validate_remote_connection(
-            nodes,
-            resource_id,
-            instance_attributes,
-            allow_not_suitable_command,
-        ))
+    if(
+        resource_agent.get_name() != remote_node.AGENT_NAME.full_name
+        and
+        not guest_node.is_node_name_in_options(meta_attributes)
+    ):
+        #if no special case happens we won't take care about corosync.conf that
+        #is needed for getting nodes to validate against
+        return
+
+    nodes_to_validate_against = _get_nodes_to_validate_against(
+        env,
+        resources_section
+    )
 
+    report_list = []
+    report_list.extend(_validate_remote_connection(
+        resource_agent,
+        nodes_to_validate_against,
+        resource_id,
+        instance_attributes,
+        allow_not_suitable_command,
+    ))
     report_list.extend(_validate_guest_change(
         resources_section,
-        nodes,
+        nodes_to_validate_against,
         meta_attributes,
         allow_not_suitable_command,
     ))
 
-    return report_list
+    env.report_processor.process_list(report_list)
 
 def create(
     env, resource_id, resource_agent_name,
@@ -165,17 +208,21 @@ def create(
         env,
         wait,
         [resource_id],
-        ensure_disabled or resource.common.are_meta_disabled(meta_attributes),
+        _ensure_disabled_after_wait(
+            ensure_disabled
+            or
+            resource.common.are_meta_disabled(meta_attributes)
+        )
     ) as resources_section:
-        env.report_processor.process_list(_validate_special_cases(
-            get_nodes(env.get_corosync_conf(), resources_section),
+        _check_special_cases(
+            env,
             resource_agent,
             resources_section,
             resource_id,
             meta_attributes,
             instance_attributes,
             allow_not_suitable_command
-        ))
+        )
 
         primitive_element = resource.primitive.create(
             env.report_processor, resources_section,
@@ -239,7 +286,7 @@ def _create_as_clone_common(
         env,
         wait,
         [resource_id],
-        (
+        _ensure_disabled_after_wait(
             ensure_disabled
             or
             resource.common.are_meta_disabled(meta_attributes)
@@ -247,15 +294,15 @@ def _create_as_clone_common(
             resource.common.is_clone_deactivated_by_meta(clone_meta_options)
         )
     ) as resources_section:
-        env.report_processor.process_list(_validate_special_cases(
-            get_nodes(env.get_corosync_conf(), resources_section),
+        _check_special_cases(
+            env,
             resource_agent,
             resources_section,
             resource_id,
             meta_attributes,
             instance_attributes,
             allow_not_suitable_command
-        ))
+        )
 
         primitive_element = resource.primitive.create(
             env.report_processor, resources_section,
@@ -323,17 +370,21 @@ def create_in_group(
         env,
         wait,
         [resource_id],
-        ensure_disabled or resource.common.are_meta_disabled(meta_attributes),
+        _ensure_disabled_after_wait(
+            ensure_disabled
+            or
+            resource.common.are_meta_disabled(meta_attributes)
+        )
     ) as resources_section:
-        env.report_processor.process_list(_validate_special_cases(
-            get_nodes(env.get_corosync_conf(), resources_section),
+        _check_special_cases(
+            env,
             resource_agent,
             resources_section,
             resource_id,
             meta_attributes,
             instance_attributes,
             allow_not_suitable_command
-        ))
+        )
 
         primitive_element = resource.primitive.create(
             env.report_processor, resources_section,
@@ -403,18 +454,22 @@ def create_into_bundle(
         env,
         wait,
         [resource_id],
-        disabled_after_wait=ensure_disabled,
+        _ensure_disabled_after_wait(
+            ensure_disabled
+            or
+            resource.common.are_meta_disabled(meta_attributes)
+        ),
         required_cib_version=(2, 8, 0)
     ) as resources_section:
-        env.report_processor.process_list(_validate_special_cases(
-            get_nodes(env.get_corosync_conf(), resources_section),
+        _check_special_cases(
+            env,
             resource_agent,
             resources_section,
             resource_id,
             meta_attributes,
             instance_attributes,
             allow_not_suitable_command
-        ))
+        )
 
         primitive_element = resource.primitive.create(
             env.report_processor, resources_section,
@@ -435,8 +490,9 @@ def create_into_bundle(
 
 def bundle_create(
     env, bundle_id, container_type, container_options=None,
-    network_options=None, port_map=None, storage_map=None,
+    network_options=None, port_map=None, storage_map=None, meta_attributes=None,
     force_options=False,
+    ensure_disabled=False,
     wait=False,
 ):
     """
@@ -447,24 +503,32 @@ def bundle_create(
     string container_type -- container engine name (docker, lxc...)
     dict container_options -- container options
     dict network_options -- network options
-    list of dict port_map -- list of port mapping options
-    list of dict storage_map -- list of storage mapping options
+    list of dict port_map -- a list of port mapping options
+    list of dict storage_map -- a list of storage mapping options
+    dict meta_attributes -- bundle's meta attributes
     bool force_options -- return warnings instead of forceable errors
+    bool ensure_disabled -- set the bundle's target-role to "Stopped"
     mixed wait -- False: no wait, None: wait default timeout, int: wait timeout
     """
     container_options = container_options or {}
     network_options = network_options or {}
     port_map = port_map or []
     storage_map = storage_map or []
+    meta_attributes = meta_attributes or {}
 
     with resource_environment(
         env,
         wait,
         [bundle_id],
-        # bundles are always enabled, currently there is no way to disable them
-        disabled_after_wait=False,
+        _ensure_disabled_after_wait(
+            ensure_disabled
+            or
+            resource.common.are_meta_disabled(meta_attributes)
+        ),
         required_cib_version=(2, 8, 0)
     ) as resources_section:
+        # no need to run validations related to remote and guest nodes as those
+        # nodes can only be created from primitive resources
         id_provider = IdProvider(resources_section)
         env.report_processor.process_list(
             resource.bundle.validate_new(
@@ -475,10 +539,11 @@ def bundle_create(
                 network_options,
                 port_map,
                 storage_map,
+                # TODO meta attributes - there is no validation for now
                 force_options
             )
         )
-        resource.bundle.append_new(
+        bundle_element = resource.bundle.append_new(
             resources_section,
             id_provider,
             bundle_id,
@@ -486,13 +551,16 @@ def bundle_create(
             container_options,
             network_options,
             port_map,
-            storage_map
+            storage_map,
+            meta_attributes
         )
+        if ensure_disabled:
+            resource.common.disable(bundle_element)
 
 def bundle_update(
     env, bundle_id, container_options=None, network_options=None,
     port_map_add=None, port_map_remove=None, storage_map_add=None,
-    storage_map_remove=None,
+    storage_map_remove=None, meta_attributes=None,
     force_options=False,
     wait=False,
 ):
@@ -507,6 +575,7 @@ def bundle_update(
     list of string port_map_remove -- list of port mapping ids to remove
     list of dict storage_map_add -- list of storage mapping options to add
     list of string storage_map_remove -- list of storage mapping ids to remove
+    dict meta_attributes -- meta attributes to update
     bool force_options -- return warnings instead of forceable errors
     mixed wait -- False: no wait, None: wait default timeout, int: wait timeout
     """
@@ -516,15 +585,16 @@ def bundle_update(
     port_map_remove = port_map_remove or []
     storage_map_add = storage_map_add or []
     storage_map_remove = storage_map_remove or []
+    meta_attributes = meta_attributes or {}
 
     with resource_environment(
         env,
         wait,
         [bundle_id],
-        # bundles are always enabled, currently there is no way to disable them
-        disabled_after_wait=False,
         required_cib_version=(2, 8, 0)
     ) as resources_section:
+        # no need to run validations related to remote and guest nodes as those
+        # nodes can only be created from primitive resources
         id_provider = IdProvider(resources_section)
         bundle_element = find_element_by_tag_and_id(
             resource.bundle.TAG,
@@ -541,6 +611,7 @@ def bundle_update(
                 port_map_remove,
                 storage_map_add,
                 storage_map_remove,
+                # TODO meta attributes - there is no validation for now
                 force_options
             )
         )
@@ -552,7 +623,8 @@ def bundle_update(
             port_map_add,
             port_map_remove,
             storage_map_add,
-            storage_map_remove
+            storage_map_remove,
+            meta_attributes
         )
 
 def disable(env, resource_ids, wait):
@@ -563,7 +635,7 @@ def disable(env, resource_ids, wait):
     mixed wait -- False: no wait, None: wait default timeout, int: wait timeout
     """
     with resource_environment(
-        env, wait, resource_ids, True
+        env, wait, resource_ids, _ensure_disabled_after_wait(True)
     ) as resources_section:
         resource_el_list = _find_resources_or_raise(
             resources_section,
@@ -585,7 +657,7 @@ def enable(env, resource_ids, wait):
     mixed wait -- False: no wait, None: wait default timeout, int: wait timeout
     """
     with resource_environment(
-        env, wait, resource_ids, False
+        env, wait, resource_ids, _ensure_disabled_after_wait(False)
     ) as resources_section:
         resource_el_list = _find_resources_or_raise(
             resources_section,
@@ -612,7 +684,7 @@ def _resource_list_enable_disable(resource_el_list, func, cluster_state):
             report_list.append(
                 reports.id_not_found(
                     res_id,
-                    id_description="resource/clone/master/group"
+                    id_description="resource/clone/master/group/bundle"
                )
             )
     return report_list
@@ -705,7 +777,7 @@ def _find_resources_or_raise(
     resource_tags = (
         resource.clone.ALL_TAGS
         +
-        [resource.group.TAG, resource.primitive.TAG]
+        [resource.group.TAG, resource.primitive.TAG, resource.bundle.TAG]
     )
     for res_id in resource_ids:
         try:
@@ -715,7 +787,7 @@ def _find_resources_or_raise(
                         resource_tags,
                         resources_section,
                         res_id,
-                        id_description="resource/clone/master/group"
+                        id_description="resource/clone/master/group/bundle"
                     )
                 )
             )
diff --git a/pcs/lib/commands/stonith.py b/pcs/lib/commands/stonith.py
index 22bb798..a3c3ad5 100644
--- a/pcs/lib/commands/stonith.py
+++ b/pcs/lib/commands/stonith.py
@@ -60,12 +60,17 @@ def create(
         ensure_disabled or are_meta_disabled(meta_attributes),
     ) as resources_section:
         stonith_element = resource.primitive.create(
-            env.report_processor, resources_section,
-            stonith_id, stonith_agent,
-            operations, meta_attributes, instance_attributes,
-            allow_invalid_operation,
-            allow_invalid_instance_attributes,
-            use_default_operations,
+            env.report_processor,
+            resources_section,
+            stonith_id,
+            stonith_agent,
+            raw_operation_list=operations,
+            meta_attributes=meta_attributes,
+            instance_attributes=instance_attributes,
+            allow_invalid_operation=allow_invalid_operation,
+            allow_invalid_instance_attributes=allow_invalid_instance_attributes,
+            use_default_operations=use_default_operations,
+            resource_type="stonith"
         )
         if ensure_disabled:
             resource.common.disable(stonith_element)
diff --git a/pcs/lib/commands/test/resource/fixture.py b/pcs/lib/commands/test/resource/fixture.py
index f1fe09b..8d96dc9 100644
--- a/pcs/lib/commands/test/resource/fixture.py
+++ b/pcs/lib/commands/test/resource/fixture.py
@@ -145,7 +145,7 @@ def report_not_found(res_id, context_type=""):
             "context_type": context_type,
             "context_id": "",
             "id": res_id,
-            "id_description": "resource/clone/master/group",
+            "id_description": "resource/clone/master/group/bundle",
         },
         None
     )
diff --git a/pcs/lib/commands/test/resource/test_bundle_create.py b/pcs/lib/commands/test/resource/test_bundle_create.py
index b9922d8..3bdeee9 100644
--- a/pcs/lib/commands/test/resource/test_bundle_create.py
+++ b/pcs/lib/commands/test/resource/test_bundle_create.py
@@ -40,7 +40,7 @@ class MinimalCreate(CommonTest):
             self.fixture_cib_pre,
             lambda: resource.bundle_create(
                 self.env, "B1", "docker",
-                {"image": "pcs:test", }
+                container_options={"image": "pcs:test", }
             ),
             self.fixture_resources_bundle_simple
         )
@@ -90,7 +90,7 @@ class MinimalCreate(CommonTest):
 
         resource.bundle_create(
             self.env, "B1", "docker",
-            {"image": "pcs:test", }
+            container_options={"image": "pcs:test", }
         )
 
         self.env.report_processor.assert_reports([
@@ -122,7 +122,7 @@ class CreateDocker(CommonTest):
             self.fixture_cib_pre,
             lambda: resource.bundle_create(
                 self.env, "B1", "docker",
-                {"image": "pcs:test", }
+                container_options={"image": "pcs:test", }
             ),
             self.fixture_resources_bundle_simple
         )
@@ -132,7 +132,7 @@ class CreateDocker(CommonTest):
             self.fixture_cib_pre,
             lambda: resource.bundle_create(
                 self.env, "B1", "docker",
-                {
+                container_options={
                     "image": "pcs:test",
                     "masters": "0",
                     "network": "extra network settings",
@@ -168,7 +168,7 @@ class CreateDocker(CommonTest):
         assert_raise_library_error(
             lambda: resource.bundle_create(
                 self.env, "B1", "docker",
-                {
+                container_options={
                     "replicas-per-host": "0",
                     "replicas": "0",
                     "masters": "-1",
@@ -226,7 +226,7 @@ class CreateDocker(CommonTest):
         assert_raise_library_error(
             lambda: resource.bundle_create(
                 self.env, "B1", "docker",
-                {
+                container_options={
                     "image": "",
                 },
                 force_options=True
@@ -253,7 +253,7 @@ class CreateDocker(CommonTest):
         assert_raise_library_error(
             lambda: resource.bundle_create(
                 self.env, "B1", "docker",
-                {
+                container_options={
                     "image": "pcs:test",
                     "extra": "option",
                 }
@@ -276,7 +276,7 @@ class CreateDocker(CommonTest):
             self.fixture_cib_pre,
             lambda: resource.bundle_create(
                 self.env, "B1", "docker",
-                {
+                container_options={
                     "image": "pcs:test",
                     "extra": "option",
                 },
@@ -932,13 +932,61 @@ class CreateWithStorageMap(CommonTest):
         )
 
 
+class CreateWithMeta(CommonTest):
+    def test_success(self):
+        self.assert_command_effect(
+            self.fixture_cib_pre,
+            lambda: resource.bundle_create(
+                self.env, "B1", "docker",
+                container_options={"image": "pcs:test", },
+                meta_attributes={
+                    "target-role": "Stopped",
+                    "is-managed": "false",
+                }
+            ),
+            """
+                <resources>
+                    <bundle id="B1">
+                        <docker image="pcs:test" />
+                        <meta_attributes id="B1-meta_attributes">
+                            <nvpair id="B1-meta_attributes-is-managed"
+                                name="is-managed" value="false" />
+                            <nvpair id="B1-meta_attributes-target-role"
+                                name="target-role" value="Stopped" />
+                        </meta_attributes>
+                    </bundle>
+                </resources>
+            """
+        )
+
+    def test_disabled(self):
+        self.assert_command_effect(
+            self.fixture_cib_pre,
+            lambda: resource.bundle_create(
+                self.env, "B1", "docker",
+                container_options={"image": "pcs:test", },
+                ensure_disabled=True
+            ),
+            """
+                <resources>
+                    <bundle id="B1">
+                        <meta_attributes id="B1-meta_attributes">
+                            <nvpair id="B1-meta_attributes-target-role"
+                                name="target-role" value="Stopped" />
+                        </meta_attributes>
+                        <docker image="pcs:test" />
+                    </bundle>
+                </resources>
+            """
+        )
+
 class CreateWithAllOptions(CommonTest):
     def test_success(self):
         self.assert_command_effect(
             self.fixture_cib_pre,
             lambda: resource.bundle_create(
                 self.env, "B1", "docker",
-                {
+                container_options={
                     "image": "pcs:test",
                     "masters": "0",
                     "network": "extra network settings",
@@ -947,13 +995,13 @@ class CreateWithAllOptions(CommonTest):
                     "replicas": "4",
                     "replicas-per-host": "2",
                 },
-                {
+                network_options={
                     "control-port": "12345",
                     "host-interface": "eth0",
                     "host-netmask": "24",
                     "ip-range-start": "192.168.100.200",
                 },
-                [
+                port_map=[
                     {
                         "port": "1001",
                     },
@@ -967,7 +1015,7 @@ class CreateWithAllOptions(CommonTest):
                         "range": "3000-3300",
                     },
                 ],
-                [
+                storage_map=[
                     {
                         "source-dir": "/tmp/docker1a",
                         "target-dir": "/tmp/docker1b",
@@ -1082,21 +1130,26 @@ class Wait(CommonTest):
         </resources>
     """
 
-    timeout = 10
+    fixture_resources_bundle_simple_disabled = """
+        <resources>
+            <bundle id="B1">
+                <meta_attributes id="B1-meta_attributes">
+                    <nvpair id="B1-meta_attributes-target-role"
+                        name="target-role" value="Stopped" />
+                </meta_attributes>
+                <docker image="pcs:test" />
+            </bundle>
+        </resources>
+    """
 
-    def fixture_calls_initial(self):
-        return (
-            fixture.call_wait_supported() +
-            fixture.calls_cib(
-                self.fixture_cib_pre,
-                self.fixture_resources_bundle_simple,
-                cib_base_file=self.cib_base_file,
-            )
-        )
+    timeout = 10
 
-    def simple_bundle_create(self, wait=False):
+    def simple_bundle_create(self, wait=False, disabled=False):
         return resource.bundle_create(
-            self.env, "B1", "docker", {"image": "pcs:test"}, wait=wait,
+            self.env, "B1", "docker",
+            container_options={"image": "pcs:test"},
+            ensure_disabled=disabled,
+            wait=wait,
         )
 
     def test_wait_fail(self):
@@ -1108,7 +1161,14 @@ class Wait(CommonTest):
             """
         )
         self.runner.set_runs(
-            self.fixture_calls_initial() +
+            fixture.call_wait_supported()
+            +
+            fixture.calls_cib(
+                self.fixture_cib_pre,
+                self.fixture_resources_bundle_simple,
+                cib_base_file=self.cib_base_file,
+            )
+            +
             fixture.call_wait(self.timeout, 62, fixture_wait_timeout_error)
         )
         assert_raise_library_error(
@@ -1122,8 +1182,16 @@ class Wait(CommonTest):
     @skip_unless_pacemaker_supports_bundle
     def test_wait_ok_run_ok(self):
         self.runner.set_runs(
-            self.fixture_calls_initial() +
-            fixture.call_wait(self.timeout) +
+            fixture.call_wait_supported()
+            +
+            fixture.calls_cib(
+                self.fixture_cib_pre,
+                self.fixture_resources_bundle_simple,
+                cib_base_file=self.cib_base_file,
+            )
+            +
+            fixture.call_wait(self.timeout)
+            +
             fixture.call_status(fixture.state_complete(
                 self.fixture_status_running
             ))
@@ -1139,8 +1207,16 @@ class Wait(CommonTest):
     @skip_unless_pacemaker_supports_bundle
     def test_wait_ok_run_fail(self):
         self.runner.set_runs(
-            self.fixture_calls_initial() +
-            fixture.call_wait(self.timeout) +
+            fixture.call_wait_supported()
+            +
+            fixture.calls_cib(
+                self.fixture_cib_pre,
+                self.fixture_resources_bundle_simple,
+                cib_base_file=self.cib_base_file,
+            )
+            +
+            fixture.call_wait(self.timeout)
+            +
             fixture.call_status(fixture.state_complete(
                 self.fixture_status_not_running
             ))
@@ -1150,3 +1226,48 @@ class Wait(CommonTest):
             fixture.report_resource_not_running("B1", severities.ERROR),
         )
         self.runner.assert_everything_launched()
+
+    @skip_unless_pacemaker_supports_bundle
+    def test_disabled_wait_ok_run_ok(self):
+        self.runner.set_runs(
+            fixture.call_wait_supported()
+            +
+            fixture.calls_cib(
+                self.fixture_cib_pre,
+                self.fixture_resources_bundle_simple_disabled,
+                cib_base_file=self.cib_base_file,
+            )
+            +
+            fixture.call_wait(self.timeout)
+            +
+            fixture.call_status(fixture.state_complete(
+                self.fixture_status_not_running
+            ))
+        )
+        self.simple_bundle_create(self.timeout, disabled=True)
+        self.runner.assert_everything_launched()
+
+    @skip_unless_pacemaker_supports_bundle
+    def test_disabled_wait_ok_run_fail(self):
+        self.runner.set_runs(
+            fixture.call_wait_supported()
+            +
+            fixture.calls_cib(
+                self.fixture_cib_pre,
+                self.fixture_resources_bundle_simple_disabled,
+                cib_base_file=self.cib_base_file,
+            )
+            +
+            fixture.call_wait(self.timeout)
+            +
+            fixture.call_status(fixture.state_complete(
+                self.fixture_status_running
+            ))
+        )
+        assert_raise_library_error(
+            lambda: self.simple_bundle_create(self.timeout, disabled=True),
+            fixture.report_resource_running(
+                "B1", {"Started": ["node1", "node2"]}, severities.ERROR
+            )
+        )
+        self.runner.assert_everything_launched()
diff --git a/pcs/lib/commands/test/resource/test_bundle_update.py b/pcs/lib/commands/test/resource/test_bundle_update.py
index 55cfa7b..7a1ee49 100644
--- a/pcs/lib/commands/test/resource/test_bundle_update.py
+++ b/pcs/lib/commands/test/resource/test_bundle_update.py
@@ -709,6 +709,96 @@ class StorageMap(CommonTest):
         self.runner.assert_everything_launched()
 
 
+class Meta(CommonTest):
+    fixture_no_meta = """
+        <resources>
+            <bundle id="B1">
+                <docker image="pcs:test" masters="3" replicas="6"/>
+            </bundle>
+        </resources>
+    """
+
+    fixture_meta_stopped = """
+        <resources>
+            <bundle id="B1">
+                <meta_attributes id="B1-meta_attributes">
+                <nvpair id="B1-meta_attributes-target-role"
+                    name="target-role" value="Stopped" />
+                </meta_attributes>
+                <docker image="pcs:test" masters="3" replicas="6"/>
+            </bundle>
+        </resources>
+    """
+
+    def test_add_meta_element(self):
+        self.assert_command_effect(
+            self.fixture_no_meta,
+            lambda: resource.bundle_update(
+                self.env, "B1",
+                meta_attributes={
+                    "target-role": "Stopped",
+                }
+            ),
+            self.fixture_meta_stopped
+        )
+
+    def test_remove_meta_element(self):
+        self.assert_command_effect(
+            self.fixture_meta_stopped,
+            lambda: resource.bundle_update(
+                self.env, "B1",
+                meta_attributes={
+                    "target-role": "",
+                }
+            ),
+            self.fixture_no_meta
+        )
+
+    def test_change_meta(self):
+        fixture_cib_pre = """
+            <resources>
+                <bundle id="B1">
+                    <meta_attributes id="B1-meta_attributes">
+                    <nvpair id="B1-meta_attributes-target-role"
+                        name="target-role" value="Stopped" />
+                    <nvpair id="B1-meta_attributes-priority"
+                        name="priority" value="15" />
+                    <nvpair id="B1-meta_attributes-is-managed"
+                        name="is-managed" value="false" />
+                    </meta_attributes>
+                    <docker image="pcs:test" masters="3" replicas="6"/>
+                </bundle>
+            </resources>
+        """
+        fixture_cib_post = """
+            <resources>
+                <bundle id="B1">
+                    <meta_attributes id="B1-meta_attributes">
+                    <nvpair id="B1-meta_attributes-target-role"
+                        name="target-role" value="Stopped" />
+                    <nvpair id="B1-meta_attributes-priority"
+                        name="priority" value="10" />
+                    <nvpair id="B1-meta_attributes-resource-stickiness"
+                        name="resource-stickiness" value="100" />
+                    </meta_attributes>
+                    <docker image="pcs:test" masters="3" replicas="6"/>
+                </bundle>
+            </resources>
+        """
+        self.assert_command_effect(
+            fixture_cib_pre,
+            lambda: resource.bundle_update(
+                self.env, "B1",
+                meta_attributes={
+                    "priority": "10",
+                    "resource-stickiness": "100",
+                    "is-managed": "",
+                }
+            ),
+            fixture_cib_post
+        )
+
+
 class Wait(CommonTest):
     fixture_status_running = """
         <resources>
@@ -794,7 +884,7 @@ class Wait(CommonTest):
         self.runner.assert_everything_launched()
 
     @skip_unless_pacemaker_supports_bundle
-    def test_wait_ok_run_ok(self):
+    def test_wait_ok_running(self):
         self.runner.set_runs(
             self.fixture_calls_initial() +
             fixture.call_wait(self.timeout) +
@@ -811,7 +901,7 @@ class Wait(CommonTest):
         self.runner.assert_everything_launched()
 
     @skip_unless_pacemaker_supports_bundle
-    def test_wait_ok_run_fail(self):
+    def test_wait_ok_not_running(self):
         self.runner.set_runs(
             self.fixture_calls_initial() +
             fixture.call_wait(self.timeout) +
@@ -819,8 +909,8 @@ class Wait(CommonTest):
                 self.fixture_status_not_running
             ))
         )
-        assert_raise_library_error(
-            lambda: self.simple_bundle_update(self.timeout),
-            fixture.report_resource_not_running("B1", severities.ERROR),
-        )
+        self.simple_bundle_update(self.timeout)
+        self.env.report_processor.assert_reports([
+            fixture.report_resource_not_running("B1", severities.INFO),
+        ])
         self.runner.assert_everything_launched()
diff --git a/pcs/lib/commands/test/resource/test_resource_enable_disable.py b/pcs/lib/commands/test/resource/test_resource_enable_disable.py
index 91ac068..b03740b 100644
--- a/pcs/lib/commands/test/resource/test_resource_enable_disable.py
+++ b/pcs/lib/commands/test/resource/test_resource_enable_disable.py
@@ -469,6 +469,35 @@ fixture_bundle_cib_disabled_primitive = """
         </bundle>
     </resources>
 """
+fixture_bundle_cib_disabled_bundle = """
+    <resources>
+        <bundle id="A-bundle">
+            <meta_attributes id="A-bundle-meta_attributes">
+                <nvpair id="A-bundle-meta_attributes-target-role"
+                    name="target-role" value="Stopped" />
+            </meta_attributes>
+            <docker image="pcs:test" />
+            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy" />
+        </bundle>
+    </resources>
+"""
+fixture_bundle_cib_disabled_both = """
+    <resources>
+        <bundle id="A-bundle">
+            <meta_attributes id="A-bundle-meta_attributes">
+                <nvpair id="A-bundle-meta_attributes-target-role"
+                    name="target-role" value="Stopped" />
+            </meta_attributes>
+            <docker image="pcs:test" />
+            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+                <meta_attributes id="A-meta_attributes">
+                    <nvpair id="A-meta_attributes-target-role"
+                        name="target-role" value="Stopped" />
+                </meta_attributes>
+            </primitive>
+        </bundle>
+    </resources>
+"""
 fixture_bundle_status_managed = """
     <resources>
         <bundle id="A-bundle" type="docker" image="pcmktest:http"
@@ -486,7 +515,7 @@ fixture_bundle_status_managed = """
 fixture_bundle_status_unmanaged = """
     <resources>
         <bundle id="A-bundle" type="docker" image="pcmktest:http"
-            unique="false" managed="true" failed="false"
+            unique="false" managed="false" failed="false"
         >
             <replica id="0">
                 <resource id="A" managed="false" />
@@ -1460,17 +1489,12 @@ class DisableBundle(ResourceWithStateTest):
         )
 
     def test_bundle(self):
-        self.runner.set_runs(
-            fixture.call_cib_load(
-                fixture.cib_resources(fixture_bundle_cib_enabled)
-            )
-        )
-
-        assert_raise_library_error(
+        self.assert_command_effect(
+            fixture_bundle_cib_enabled,
+            fixture_bundle_status_managed,
             lambda: resource.disable(self.env, ["A-bundle"], False),
-            fixture.report_not_for_bundles("A-bundle")
+            fixture_bundle_cib_disabled_bundle
         )
-        self.runner.assert_everything_launched()
 
     def test_primitive_unmanaged(self):
         self.assert_command_effect(
@@ -1483,6 +1507,17 @@ class DisableBundle(ResourceWithStateTest):
             ]
         )
 
+    def test_bundle_unmanaged(self):
+        self.assert_command_effect(
+            fixture_bundle_cib_enabled,
+            fixture_bundle_status_unmanaged,
+            lambda: resource.disable(self.env, ["A-bundle"], False),
+            fixture_bundle_cib_disabled_bundle,
+            reports=[
+                fixture_report_unmanaged("A-bundle"),
+            ]
+        )
+
 
 @skip_unless_pacemaker_supports_bundle
 class EnableBundle(ResourceWithStateTest):
@@ -1494,18 +1529,29 @@ class EnableBundle(ResourceWithStateTest):
             fixture_bundle_cib_enabled
         )
 
+    def test_primitive_disabled_both(self):
+        self.assert_command_effect(
+            fixture_bundle_cib_disabled_both,
+            fixture_bundle_status_managed,
+            lambda: resource.enable(self.env, ["A"], False),
+            fixture_bundle_cib_enabled
+        )
+
     def test_bundle(self):
-        self.runner.set_runs(
-            fixture.call_cib_load(
-                fixture.cib_resources(fixture_bundle_cib_enabled)
-            )
+        self.assert_command_effect(
+            fixture_bundle_cib_disabled_bundle,
+            fixture_bundle_status_managed,
+            lambda: resource.enable(self.env, ["A-bundle"], False),
+            fixture_bundle_cib_enabled
         )
 
-        assert_raise_library_error(
+    def test_bundle_disabled_both(self):
+        self.assert_command_effect(
+            fixture_bundle_cib_disabled_both,
+            fixture_bundle_status_managed,
             lambda: resource.enable(self.env, ["A-bundle"], False),
-            fixture.report_not_for_bundles("A-bundle")
+            fixture_bundle_cib_enabled
         )
-        self.runner.assert_everything_launched()
 
     def test_primitive_unmanaged(self):
         self.assert_command_effect(
@@ -1515,5 +1561,18 @@ class EnableBundle(ResourceWithStateTest):
             fixture_bundle_cib_enabled,
             reports=[
                 fixture_report_unmanaged("A"),
+                fixture_report_unmanaged("A-bundle"),
+            ]
+        )
+
+    def test_bundle_unmanaged(self):
+        self.assert_command_effect(
+            fixture_bundle_cib_disabled_primitive,
+            fixture_bundle_status_unmanaged,
+            lambda: resource.enable(self.env, ["A-bundle"], False),
+            fixture_bundle_cib_enabled,
+            reports=[
+                fixture_report_unmanaged("A-bundle"),
+                fixture_report_unmanaged("A"),
             ]
         )
diff --git a/pcs/lib/commands/test/resource/test_resource_manage_unmanage.py b/pcs/lib/commands/test/resource/test_resource_manage_unmanage.py
index 6d8c787..95b44bc 100644
--- a/pcs/lib/commands/test/resource/test_resource_manage_unmanage.py
+++ b/pcs/lib/commands/test/resource/test_resource_manage_unmanage.py
@@ -517,6 +517,26 @@ fixture_clone_group_cib_unmanaged_all_primitives_op_disabled = """
     </resources>
 """
 
+
+fixture_bundle_empty_cib_managed = """
+    <resources>
+        <bundle id="A-bundle">
+            <docker image="pcs:test" />
+        </bundle>
+    </resources>
+"""
+fixture_bundle_empty_cib_unmanaged_bundle = """
+    <resources>
+        <bundle id="A-bundle">
+            <meta_attributes id="A-bundle-meta_attributes">
+                <nvpair id="A-bundle-meta_attributes-is-managed"
+                    name="is-managed" value="false" />
+            </meta_attributes>
+            <docker image="pcs:test" />
+        </bundle>
+    </resources>
+"""
+
 fixture_bundle_cib_managed = """
     <resources>
         <bundle id="A-bundle">
@@ -526,7 +546,19 @@ fixture_bundle_cib_managed = """
         </bundle>
     </resources>
 """
-
+fixture_bundle_cib_unmanaged_bundle = """
+    <resources>
+        <bundle id="A-bundle">
+            <meta_attributes id="A-bundle-meta_attributes">
+                <nvpair id="A-bundle-meta_attributes-is-managed"
+                    name="is-managed" value="false" />
+            </meta_attributes>
+            <docker image="pcs:test" />
+            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+            </primitive>
+        </bundle>
+    </resources>
+"""
 fixture_bundle_cib_unmanaged_primitive = """
     <resources>
         <bundle id="A-bundle">
@@ -540,6 +572,78 @@ fixture_bundle_cib_unmanaged_primitive = """
         </bundle>
     </resources>
 """
+fixture_bundle_cib_unmanaged_both = """
+    <resources>
+        <bundle id="A-bundle">
+            <meta_attributes id="A-bundle-meta_attributes">
+                <nvpair id="A-bundle-meta_attributes-is-managed"
+                    name="is-managed" value="false" />
+            </meta_attributes>
+            <docker image="pcs:test" />
+            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+                <meta_attributes id="A-meta_attributes">
+                    <nvpair id="A-meta_attributes-is-managed"
+                        name="is-managed" value="false" />
+                </meta_attributes>
+            </primitive>
+        </bundle>
+    </resources>
+"""
+
+fixture_bundle_cib_managed_op_enabled = """
+    <resources>
+        <bundle id="A-bundle">
+            <docker image="pcs:test" />
+            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+                <operations>
+                    <op id="A-start" name="start" />
+                    <op id="A-stop" name="stop" />
+                    <op id="A-monitor" name="monitor"/>
+                </operations>
+            </primitive>
+        </bundle>
+    </resources>
+"""
+fixture_bundle_cib_unmanaged_primitive_op_disabled = """
+    <resources>
+        <bundle id="A-bundle">
+            <docker image="pcs:test" />
+            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+                <meta_attributes id="A-meta_attributes">
+                    <nvpair id="A-meta_attributes-is-managed"
+                        name="is-managed" value="false" />
+                </meta_attributes>
+                <operations>
+                    <op id="A-start" name="start" />
+                    <op id="A-stop" name="stop" />
+                    <op id="A-monitor" name="monitor" enabled="false"/>
+                </operations>
+            </primitive>
+        </bundle>
+    </resources>
+"""
+fixture_bundle_cib_unmanaged_both_op_disabled = """
+    <resources>
+        <bundle id="A-bundle">
+            <meta_attributes id="A-bundle-meta_attributes">
+                <nvpair id="A-bundle-meta_attributes-is-managed"
+                    name="is-managed" value="false" />
+            </meta_attributes>
+            <docker image="pcs:test" />
+            <primitive id="A" class="ocf" provider="heartbeat" type="Dummy">
+                <meta_attributes id="A-meta_attributes">
+                    <nvpair id="A-meta_attributes-is-managed"
+                        name="is-managed" value="false" />
+                </meta_attributes>
+                <operations>
+                    <op id="A-start" name="start" />
+                    <op id="A-stop" name="stop" />
+                    <op id="A-monitor" name="monitor" enabled="false"/>
+                </operations>
+            </primitive>
+        </bundle>
+    </resources>
+"""
 
 def fixture_report_no_monitors(resource):
     return (
@@ -852,17 +956,18 @@ class UnmanageBundle(ResourceWithoutStateTest):
         )
 
     def test_bundle(self):
-        self.runner.set_runs(
-            fixture.call_cib_load(
-                fixture.cib_resources(fixture_bundle_cib_managed)
-            )
+        self.assert_command_effect(
+            fixture_bundle_cib_managed,
+            lambda: resource.unmanage(self.env, ["A-bundle"]),
+            fixture_bundle_cib_unmanaged_both
         )
 
-        assert_raise_library_error(
-            lambda: resource.unmanage(self.env, ["A-bundle"], False),
-            fixture.report_not_for_bundles("A-bundle")
+    def test_bundle_empty(self):
+        self.assert_command_effect(
+            fixture_bundle_empty_cib_managed,
+            lambda: resource.unmanage(self.env, ["A-bundle"]),
+            fixture_bundle_empty_cib_unmanaged_bundle
         )
-        self.runner.assert_everything_launched()
 
 
 class ManageBundle(ResourceWithoutStateTest):
@@ -873,18 +978,47 @@ class ManageBundle(ResourceWithoutStateTest):
             fixture_bundle_cib_managed,
         )
 
+    def test_primitive_unmanaged_bundle(self):
+        self.assert_command_effect(
+            fixture_bundle_cib_unmanaged_bundle,
+            lambda: resource.manage(self.env, ["A"]),
+            fixture_bundle_cib_managed,
+        )
+
+    def test_primitive_unmanaged_both(self):
+        self.assert_command_effect(
+            fixture_bundle_cib_unmanaged_both,
+            lambda: resource.manage(self.env, ["A"]),
+            fixture_bundle_cib_managed,
+        )
+
     def test_bundle(self):
-        self.runner.set_runs(
-            fixture.call_cib_load(
-                fixture.cib_resources(fixture_bundle_cib_unmanaged_primitive)
-            )
+        self.assert_command_effect(
+            fixture_bundle_cib_unmanaged_bundle,
+            lambda: resource.manage(self.env, ["A-bundle"]),
+            fixture_bundle_cib_managed,
         )
 
-        assert_raise_library_error(
-            lambda: resource.manage(self.env, ["A-bundle"], False),
-            fixture.report_not_for_bundles("A-bundle")
+    def test_bundle_unmanaged_primitive(self):
+        self.assert_command_effect(
+            fixture_bundle_cib_unmanaged_primitive,
+            lambda: resource.manage(self.env, ["A-bundle"]),
+            fixture_bundle_cib_managed,
+        )
+
+    def test_bundle_unmanaged_both(self):
+        self.assert_command_effect(
+            fixture_bundle_cib_unmanaged_both,
+            lambda: resource.manage(self.env, ["A-bundle"]),
+            fixture_bundle_cib_managed,
+        )
+
+    def test_bundle_empty(self):
+        self.assert_command_effect(
+            fixture_bundle_empty_cib_unmanaged_bundle,
+            lambda: resource.manage(self.env, ["A-bundle"]),
+            fixture_bundle_empty_cib_managed
         )
-        self.runner.assert_everything_launched()
 
 
 class MoreResources(ResourceWithoutStateTest):
@@ -1090,3 +1224,24 @@ class WithMonitor(ResourceWithoutStateTest):
             lambda: resource.unmanage(self.env, ["A1"], True),
             fixture_clone_group_cib_unmanaged_primitive_op_disabled
         )
+
+    def test_unmanage_bundle(self):
+        self.assert_command_effect(
+            fixture_bundle_cib_managed_op_enabled,
+            lambda: resource.unmanage(self.env, ["A-bundle"], True),
+            fixture_bundle_cib_unmanaged_both_op_disabled
+        )
+
+    def test_unmanage_in_bundle(self):
+        self.assert_command_effect(
+            fixture_bundle_cib_managed_op_enabled,
+            lambda: resource.unmanage(self.env, ["A"], True),
+            fixture_bundle_cib_unmanaged_primitive_op_disabled
+        )
+
+    def test_unmanage_bundle_empty(self):
+        self.assert_command_effect(
+            fixture_bundle_empty_cib_managed,
+            lambda: resource.unmanage(self.env, ["A-bundle"], True),
+            fixture_bundle_empty_cib_unmanaged_bundle
+        )
diff --git a/pcs/lib/commands/test/test_stonith_agent.py b/pcs/lib/commands/test/test_stonith_agent.py
index 1bbbcb1..f3a4fe9 100644
--- a/pcs/lib/commands/test/test_stonith_agent.py
+++ b/pcs/lib/commands/test/test_stonith_agent.py
@@ -8,7 +8,11 @@ from __future__ import (
 import logging
 from lxml import etree
 
-from pcs.test.tools.assertions import assert_raise_library_error, start_tag_error_text
+from pcs.test.tools.assertions import (
+    assert_raise_library_error,
+    assert_report_item_list_equal,
+    start_tag_error_text,
+)
 from pcs.test.tools.custom_mock import MockLibraryReportProcessor
 from pcs.test.tools.pcs_unittest import mock, TestCase
 
@@ -16,6 +20,7 @@ from pcs.common import report_codes
 from pcs.lib import resource_agent as lib_ra
 from pcs.lib.env import LibraryEnvironment
 from pcs.lib.errors import ReportItemSeverity as severity
+from pcs.lib.external import CommandRunner
 
 from pcs.lib.commands import stonith_agent as lib
 
@@ -212,3 +217,109 @@ class TestDescribeAgent(TestCase):
         )
 
         self.assertEqual(len(mock_metadata.mock_calls), 1)
+
+
+class ValidateParameters(TestCase):
+    def setUp(self):
+        self.agent = lib_ra.StonithAgent(
+            mock.MagicMock(spec_set=CommandRunner),
+            "fence_dummy"
+        )
+        self.metadata = etree.XML("""
+            <resource-agent>
+                <parameters>
+                    <parameter name="test_param" required="0">
+                        <longdesc>Long description</longdesc>
+                        <shortdesc>short description</shortdesc>
+                        <content type="string" default="default_value" />
+                    </parameter>
+                    <parameter name="required_param" required="1">
+                        <content type="boolean" />
+                    </parameter>
+                    <parameter name="action">
+                        <content type="string" default="reboot" />
+                        <shortdesc>Fencing action</shortdesc>
+                    </parameter>
+                </parameters>
+            </resource-agent>
+        """)
+        patcher = mock.patch.object(lib_ra.StonithAgent, "_get_metadata")
+        self.addCleanup(patcher.stop)
+        self.get_metadata = patcher.start()
+        self.get_metadata.return_value = self.metadata
+
+        patcher_stonithd = mock.patch.object(
+            lib_ra.StonithdMetadata, "_get_metadata"
+        )
+        self.addCleanup(patcher_stonithd.stop)
+        self.get_stonithd_metadata = patcher_stonithd.start()
+        self.get_stonithd_metadata.return_value = etree.XML("""
+            <resource-agent>
+                <parameters />
+            </resource-agent>
+        """)
+
+    def test_action_is_deprecated(self):
+        assert_report_item_list_equal(
+            self.agent.validate_parameters({
+                "action": "reboot",
+                "required_param": "value",
+            }),
+            [
+                (
+                    severity.ERROR,
+                    report_codes.DEPRECATED_OPTION,
+                    {
+                        "option_name": "action",
+                        "option_type": "stonith",
+                        "replaced_by": [
+                            "pcmk_off_action",
+                            "pcmk_reboot_action"
+                        ],
+                    },
+                    report_codes.FORCE_OPTIONS
+                ),
+            ],
+        )
+
+    def test_action_is_deprecated_forced(self):
+        assert_report_item_list_equal(
+            self.agent.validate_parameters({
+                "action": "reboot",
+                "required_param": "value",
+            }, allow_invalid=True),
+            [
+                (
+                    severity.WARNING,
+                    report_codes.DEPRECATED_OPTION,
+                    {
+                        "option_name": "action",
+                        "option_type": "stonith",
+                        "replaced_by": [
+                            "pcmk_off_action",
+                            "pcmk_reboot_action"
+                        ],
+                    },
+                    None
+                ),
+            ],
+        )
+
+    def test_action_not_reported_deprecated_when_empty(self):
+        assert_report_item_list_equal(
+            self.agent.validate_parameters({
+                "action": "",
+                "required_param": "value",
+            }),
+            [
+            ],
+        )
+
+    def test_required_not_specified_on_update(self):
+        assert_report_item_list_equal(
+            self.agent.validate_parameters({
+                "test_param": "value",
+            }, update=True),
+            [
+            ],
+        )
diff --git a/pcs/lib/env.py b/pcs/lib/env.py
index c41685b..97ec50c 100644
--- a/pcs/lib/env.py
+++ b/pcs/lib/env.py
@@ -255,11 +255,10 @@ class LibraryEnvironment(object):
         return exists_local_corosync_conf()
 
     def command_expect_live_corosync_env(self):
-        # TODO get rid of cli knowledge
         if not self.is_corosync_conf_live:
-            raise LibraryError(reports.live_environment_required([
-                "--corosync_conf"
-            ]))
+            raise LibraryError(
+                reports.live_environment_required(["COROSYNC_CONF"])
+            )
 
     @property
     def is_corosync_conf_live(self):
diff --git a/pcs/lib/nodes_task.py b/pcs/lib/nodes_task.py
index 703609b..ce26923 100644
--- a/pcs/lib/nodes_task.py
+++ b/pcs/lib/nodes_task.py
@@ -277,7 +277,8 @@ def availability_checker_remote_node(
 
 def check_can_add_node_to_cluster(
     node_communicator, node, report_items,
-    check_response=availability_checker_node
+    check_response=availability_checker_node,
+    warn_on_communication_exception=False,
 ):
     """
     Analyze result of node_available check if it is possible use the node as
@@ -294,13 +295,21 @@ def check_can_add_node_to_cluster(
         node_communicator,
         node,
         "remote/node_available",
-        safe_report_items
+        safe_report_items,
+        warn_on_communication_exception=warn_on_communication_exception
     )
     report_items.extend(safe_report_items)
 
     if ReportListAnalyzer(safe_report_items).error_list:
         return
 
+    #If there was a communication error and --skip-offline is in effect, no
+    #exception was raised. If there is no result cannot process it.
+    #Note: the error may be caused by older pcsd daemon not supporting commands
+    #sent by newer client.
+    if not availability_info:
+        return
+
     is_in_expected_format = (
         isinstance(availability_info, dict)
         and
@@ -360,6 +369,7 @@ def _run_actions_on_multiple_nodes(
     node_communicator, url, response_key, report_processor, create_start_report,
     actions, node_addresses_list, is_success,
     create_success_report, create_error_report, force_code, format_result,
+    skip_offline_nodes=False,
     allow_incomplete_distribution=False, description=""
 ):
     error_map = defaultdict(dict)
@@ -371,7 +381,7 @@ def _run_actions_on_multiple_nodes(
             report_processor,
             node_addresses,
             actions,
-            warn_on_communication_exception=allow_incomplete_distribution,
+            warn_on_communication_exception=skip_offline_nodes,
         )
         #If there was a communication error and --skip-offline is in effect, no
         #exception was raised. If there is no result cannot process it.
@@ -418,6 +428,7 @@ def _run_actions_on_multiple_nodes(
 
 def distribute_files(
     node_communicator, report_processor, file_definitions, node_addresses_list,
+    skip_offline_nodes=False,
     allow_incomplete_distribution=False, description=""
 ):
     """
@@ -455,12 +466,14 @@ def distribute_files(
         node_communication_format.get_format_result({
             "conflict": "File already exists",
         }),
+        skip_offline_nodes,
         allow_incomplete_distribution,
         description,
     )
 
 def remove_files(
     node_communicator, report_processor, file_definitions, node_addresses_list,
+    skip_offline_nodes=False,
     allow_incomplete_distribution=False, description=""
 ):
     _run_actions_on_multiple_nodes(
@@ -476,13 +489,16 @@ def remove_files(
         reports.file_remove_from_node_error,
         report_codes.SKIP_FILE_DISTRIBUTION_ERRORS,
         node_communication_format.get_format_result({}),
+        skip_offline_nodes,
         allow_incomplete_distribution,
         description,
     )
 
 def run_actions_on_multiple_nodes(
     node_communicator, report_processor, action_definitions, is_success,
-    node_addresses_list, allow_fails=False, description=""
+    node_addresses_list,
+    skip_offline_nodes=False,
+    allow_fails=False, description=""
 ):
     _run_actions_on_multiple_nodes(
         node_communicator,
@@ -499,6 +515,7 @@ def run_actions_on_multiple_nodes(
         node_communication_format.get_format_result({
             "fail": "Operation failed.",
         }),
+        skip_offline_nodes,
         allow_fails,
         description,
     )
diff --git a/pcs/lib/pacemaker/state.py b/pcs/lib/pacemaker/state.py
index 71809db..be3e7ad 100644
--- a/pcs/lib/pacemaker/state.py
+++ b/pcs/lib/pacemaker/state.py
@@ -201,6 +201,25 @@ def _get_primitive_roles_with_nodes(primitive_el_list):
         for role, nodes in roles_with_nodes.items()
     ])
 
+def info_resource_state(cluster_state, resource_id):
+    roles_with_nodes = _get_primitive_roles_with_nodes(
+        _get_primitives_for_state_check(
+            cluster_state,
+            resource_id,
+            expected_running=True
+        )
+    )
+    if not roles_with_nodes:
+        return reports.resource_does_not_run(
+            resource_id,
+            severities.INFO
+        )
+    return reports.resource_running_on_nodes(
+        resource_id,
+        roles_with_nodes,
+        severities.INFO
+    )
+
 def ensure_resource_state(expected_running, cluster_state, resource_id):
     roles_with_nodes = _get_primitive_roles_with_nodes(
         _get_primitives_for_state_check(
@@ -244,18 +263,25 @@ def is_resource_managed(cluster_state, resource_id):
         for primitive in primitive_list:
             if is_false(primitive.attrib.get("managed", "")):
                 return False
-            clone = find_parent(primitive, ["clone"])
-            if clone is not None and is_false(clone.attrib.get("managed", "")):
+            parent = find_parent(primitive, ["clone", "bundle"])
+            if (
+                parent is not None
+                and
+                is_false(parent.attrib.get("managed", ""))
+            ):
                 return False
         return True
 
-    clone_list = cluster_state.xpath(
-        """.//clone[@id="{0}"]""".format(resource_id)
+    parent_list = cluster_state.xpath("""
+        .//clone[@id="{0}"]
+        |
+        .//bundle[@id="{0}"]
+        """.format(resource_id)
     )
-    for clone in clone_list:
-        if is_false(clone.attrib.get("managed", "")):
+    for parent in parent_list:
+        if is_false(parent.attrib.get("managed", "")):
             return False
-        for primitive in clone.xpath(".//resource"):
+        for primitive in parent.xpath(".//resource"):
             if is_false(primitive.attrib.get("managed", "")):
                 return False
         return True
diff --git a/pcs/lib/pacemaker/test/test_state.py b/pcs/lib/pacemaker/test/test_state.py
index a29eddf..5de9426 100644
--- a/pcs/lib/pacemaker/test/test_state.py
+++ b/pcs/lib/pacemaker/test/test_state.py
@@ -491,7 +491,7 @@ class GetPrimitivesForStateCheck(TestCase):
         self.assert_primitives("B2-R2", ["B2-R2", "B2-R2"], False)
 
 
-class EnsureResourceState(TestCase):
+class CommonResourceState(TestCase):
     resource_id = "R"
     def setUp(self):
         self.cluster_state = "state"
@@ -526,6 +526,8 @@ class EnsureResourceState(TestCase):
             "resource_id": self.resource_id
         })
 
+
+class EnsureResourceState(CommonResourceState):
     def assert_running_info_transform(self, run_info, report, expected_running):
         self.get_primitives_for_state_check.return_value = ["elem1", "elem2"]
         self.get_primitive_roles_with_nodes.return_value = run_info
@@ -575,6 +577,35 @@ class EnsureResourceState(TestCase):
         )
 
 
+class InfoResourceState(CommonResourceState):
+    def assert_running_info_transform(self, run_info, report):
+        self.get_primitives_for_state_check.return_value = ["elem1", "elem2"]
+        self.get_primitive_roles_with_nodes.return_value = run_info
+        assert_report_item_equal(
+            state.info_resource_state(self.cluster_state, self.resource_id),
+            report
+        )
+        self.get_primitives_for_state_check.assert_called_once_with(
+            self.cluster_state,
+            self.resource_id,
+            expected_running=True
+        )
+        self.get_primitive_roles_with_nodes.assert_called_once_with(
+            ["elem1", "elem2"]
+        )
+
+    def test_report_info_running(self):
+        self.assert_running_info_transform(
+            self.fixture_running_state_info(),
+            self.fixture_running_report(severities.INFO)
+        )
+    def test_report_info_not_running(self):
+        self.assert_running_info_transform(
+            [],
+            self.fixture_not_running_report(severities.INFO)
+        )
+
+
 class IsResourceManaged(TestCase):
     status_xml = etree.fromstring("""
         <resources>
@@ -733,6 +764,60 @@ class IsResourceManaged(TestCase):
                     <resource id="R38:1" managed="false" />
                 </group>
             </clone>
+
+            <bundle id="B1" managed="true" />
+            <bundle id="B2" managed="false" />
+
+            <bundle id="B3" managed="true">
+                <replica id="0">
+                    <resource id="R39" managed="true" />
+                    <resource id="R40" managed="true" />
+                </replica>
+                <replica id="1">
+                    <resource id="R39" managed="true" />
+                    <resource id="R40" managed="true" />
+                </replica>
+            </bundle>
+            <bundle id="B4" managed="false">
+                <replica id="0">
+                    <resource id="R41" managed="true" />
+                    <resource id="R42" managed="true" />
+                </replica>
+                <replica id="1">
+                    <resource id="R41" managed="true" />
+                    <resource id="R42" managed="true" />
+                </replica>
+            </bundle>
+            <bundle id="B5" managed="true">
+                <replica id="0">
+                    <resource id="R43" managed="false" />
+                    <resource id="R44" managed="true" />
+                </replica>
+                <replica id="1">
+                    <resource id="R43" managed="false" />
+                    <resource id="R44" managed="true" />
+                </replica>
+            </bundle>
+            <bundle id="B6" managed="true">
+                <replica id="0">
+                    <resource id="R45" managed="true" />
+                    <resource id="R46" managed="false" />
+                </replica>
+                <replica id="1">
+                    <resource id="R45" managed="true" />
+                    <resource id="R46" managed="false" />
+                </replica>
+            </bundle>
+            <bundle id="B7" managed="false">
+                <replica id="0">
+                    <resource id="R47" managed="false" />
+                    <resource id="R48" managed="false" />
+                </replica>
+                <replica id="1">
+                    <resource id="R47" managed="false" />
+                    <resource id="R48" managed="false" />
+                </replica>
+            </bundle>
         </resources>
     """)
 
@@ -856,3 +941,24 @@ class IsResourceManaged(TestCase):
         self.assert_managed("R36", False)
         self.assert_managed("R37", False)
         self.assert_managed("R38", False)
+
+    def test_bundle(self):
+        self.assert_managed("B1", True)
+        self.assert_managed("B2", False)
+        self.assert_managed("B3", True)
+        self.assert_managed("B4", False)
+        self.assert_managed("B5", False)
+        self.assert_managed("B6", False)
+        self.assert_managed("B7", False)
+
+    def test_primitive_in_bundle(self):
+        self.assert_managed("R39", True)
+        self.assert_managed("R40", True)
+        self.assert_managed("R41", False)
+        self.assert_managed("R42", False)
+        self.assert_managed("R43", False)
+        self.assert_managed("R44", True)
+        self.assert_managed("R45", True)
+        self.assert_managed("R46", False)
+        self.assert_managed("R47", False)
+        self.assert_managed("R48", False)
diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
index 64d7143..bda17ca 100644
--- a/pcs/lib/reports.py
+++ b/pcs/lib/reports.py
@@ -280,6 +280,30 @@ def invalid_option_value(
         forceable=forceable
     )
 
+def deprecated_option(
+    option_name, replaced_by_options, option_type,
+    severity=ReportItemSeverity.ERROR, forceable=None
+):
+    """
+    Specified option name is deprecated and has been replaced by other option(s)
+
+    string option_name -- the deprecated option
+    iterable or string replaced_by_options -- new option(s) to be used instead
+    string option_type -- option description
+    string severity -- report item severity
+    string forceable -- a category by which the report is forceable
+    """
+    return ReportItem(
+        report_codes.DEPRECATED_OPTION,
+        severity,
+        info={
+            "option_name": option_name,
+            "option_type": option_type,
+            "replaced_by": sorted(replaced_by_options),
+        },
+        forceable=forceable
+    )
+
 def mutually_exclusive_options(option_names, option_type):
     """
     entered options can not coexist
diff --git a/pcs/lib/resource_agent.py b/pcs/lib/resource_agent.py
index fb3cf0b..ddd1058 100644
--- a/pcs/lib/resource_agent.py
+++ b/pcs/lib/resource_agent.py
@@ -5,11 +5,10 @@ from __future__ import (
     unicode_literals,
 )
 
-import os
-import re
 from collections import namedtuple
-
 from lxml import etree
+import os
+import re
 
 from pcs import settings
 from pcs.common import report_codes
@@ -52,6 +51,8 @@ DEFAULT_INTERVALS = {
     "monitor": "60s"
 }
 
+_STONITH_ACTION_REPLACED_BY = ("pcmk_off_action", "pcmk_reboot_action")
+
 
 def get_default_interval(operation_name):
     """
@@ -464,7 +465,7 @@ class Agent(object):
             value_type = content_element.get("type", value_type)
             default_value = content_element.get("default", default_value)
 
-        return {
+        return self._create_parameter({
             "name": parameter_element.get("name", ""),
             "longdesc": self._get_text_from_dom_element(
                 parameter_element.find("longdesc")
@@ -478,12 +479,13 @@ class Agent(object):
             "advanced": False,
             "deprecated": is_true(parameter_element.get("deprecated", "0")),
             "obsoletes": parameter_element.get("obsoletes", None),
-        }
+        })
 
     def validate_parameters(
         self, parameters,
-        parameters_type="resource agent parameter",
-        allow_invalid=False
+        parameters_type="resource",
+        allow_invalid=False,
+        update=False
     ):
         forceable = report_codes.FORCE_OPTIONS if not allow_invalid else None
         severity = (
@@ -505,7 +507,7 @@ class Agent(object):
                 forceable=forceable,
             ))
 
-        if missing_req_opts:
+        if not update and missing_req_opts:
             report_list.append(reports.required_option_is_missing(
                 missing_req_opts,
                 parameters_type,
@@ -620,6 +622,22 @@ class Agent(object):
             return ""
         return element.text.strip()
 
+    def _create_parameter(self, properties):
+        new_param = {
+            "name": "",
+            "longdesc": "",
+            "shortdesc": "",
+            "type": "string",
+            "default": None,
+            "required": False,
+            "advanced": False,
+            "deprecated": False,
+            "obsoletes": None,
+            "pcs_deprecated_warning": "",
+        }
+        new_param.update(properties)
+        return new_param
+
 
 class FakeAgentMetadata(Agent):
     #pylint:disable=abstract-method
@@ -755,7 +773,7 @@ class ResourceAgent(CrmAgent):
                     "Set to 1 to turn on resource agent tracing"
                     " (expect large output)"
                 )
-                parameters.append({
+                parameters.append(self._create_parameter({
                     "name": "trace_ra",
                     "longdesc": (
                         shortdesc
@@ -771,12 +789,12 @@ class ResourceAgent(CrmAgent):
                     "default": 0,
                     "required": False,
                     "advanced": True,
-                })
+                }))
             if not trace_file_found:
                 shortdesc = (
                     "Path to a file to store resource agent tracing log"
                 )
-                parameters.append({
+                parameters.append(self._create_parameter({
                     "name": "trace_file",
                     "longdesc": shortdesc,
                     "shortdesc": shortdesc,
@@ -784,7 +802,7 @@ class ResourceAgent(CrmAgent):
                     "default": "",
                     "required": False,
                     "advanced": True,
-                })
+                }))
 
         return parameters
 
@@ -827,6 +845,33 @@ class StonithAgent(CrmAgent):
             self._get_stonithd_metadata().get_parameters()
         )
 
+    def validate_parameters(
+        self, parameters,
+        parameters_type="stonith",
+        allow_invalid=False,
+        update=False
+    ):
+        report_list = super(StonithAgent, self).validate_parameters(
+            parameters,
+            parameters_type=parameters_type,
+            allow_invalid=allow_invalid,
+            update=update
+        )
+        if parameters.get("action", ""):
+            report_list.append(reports.deprecated_option(
+                "action",
+                _STONITH_ACTION_REPLACED_BY,
+                parameters_type,
+                severity=(
+                    ReportItemSeverity.ERROR if not allow_invalid
+                    else ReportItemSeverity.WARNING
+                ),
+                forceable=(
+                    report_codes.FORCE_OPTIONS if not allow_invalid else None
+                )
+            ))
+        return report_list
+
     def _filter_parameters(self, parameters):
         """
         Remove parameters that should not be available to the user.
@@ -834,9 +879,7 @@ class StonithAgent(CrmAgent):
         # We don't allow the user to change these options which are only
         # intended to be used interactively on command line.
         remove_parameters = frozenset([
-            "debug",
             "help",
-            "verbose",
             "version",
         ])
         filtered = []
@@ -846,15 +889,19 @@ class StonithAgent(CrmAgent):
             elif param["name"] == "action":
                 # However we still need the user to be able to set 'action' due
                 # to backward compatibility reasons. So we just mark it as not
-                # required.
+                # required. We also move it to advanced params to indicate users
+                # should not set it in most cases.
                 new_param = dict(param)
-                new_param["shortdesc"] = "\n".join(filter(None, [
-                    param.get("shortdesc", ""),
-                    "WARNING: specifying 'action' is deprecated and not "
-                        "necessary with current Pacemaker versions."
-                    ,
-                ]))
                 new_param["required"] = False
+                new_param["advanced"] = True
+                new_param["pcs_deprecated_warning"] = (
+                    "Specifying 'action' is deprecated and not necessary with"
+                        " current Pacemaker versions. Use {0} instead."
+                ).format(
+                    ", ".join(
+                        ["'{0}'".format(x) for x in _STONITH_ACTION_REPLACED_BY]
+                    )
+                )
                 filtered.append(new_param)
             else:
                 filtered.append(param)
diff --git a/pcs/lib/test/test_nodes_task.py b/pcs/lib/test/test_nodes_task.py
index 61ba132..5459337 100644
--- a/pcs/lib/test/test_nodes_task.py
+++ b/pcs/lib/test/test_nodes_task.py
@@ -790,10 +790,6 @@ class CheckCanAddNodeToCluster(TestCase):
     def test_report_no_dict_in_json_response(self):
         self.assert_result_causes_invalid_format("bad answer")
 
-    def test_report_dict_without_mandatory_key(self):
-        self.assert_result_causes_invalid_format({})
-
-
 class OnNodeTest(TestCase):
     def setUp(self):
         self.reporter = MockLibraryReportProcessor()
diff --git a/pcs/lib/test/test_resource_agent.py b/pcs/lib/test/test_resource_agent.py
index d821f4d..1df618b 100644
--- a/pcs/lib/test/test_resource_agent.py
+++ b/pcs/lib/test/test_resource_agent.py
@@ -791,6 +791,7 @@ class AgentMetadataGetParametersTest(TestCase):
                     "advanced": False,
                     "deprecated": False,
                     "obsoletes": None,
+                    "pcs_deprecated_warning": "",
                 }
             ]
         )
@@ -824,6 +825,7 @@ class AgentMetadataGetParametersTest(TestCase):
                     "advanced": False,
                     "deprecated": False,
                     "obsoletes": None,
+                    "pcs_deprecated_warning": "",
                 },
                 {
                     "name": "another parameter",
@@ -835,6 +837,7 @@ class AgentMetadataGetParametersTest(TestCase):
                     "advanced": False,
                     "deprecated": False,
                     "obsoletes": None,
+                    "pcs_deprecated_warning": "",
                 }
             ]
         )
@@ -862,6 +865,7 @@ class AgentMetadataGetParametersTest(TestCase):
                     "advanced": False,
                     "deprecated": True,
                     "obsoletes": None,
+                    "pcs_deprecated_warning": "",
                 },
             ]
         )
@@ -1101,6 +1105,7 @@ class AgentMetadataGetInfoTest(TestCase):
                         "advanced": False,
                         "deprecated": False,
                         "obsoletes": None,
+                        "pcs_deprecated_warning": "",
                     },
                     {
                         "name": "another parameter",
@@ -1112,6 +1117,7 @@ class AgentMetadataGetInfoTest(TestCase):
                         "advanced": False,
                         "deprecated": False,
                         "obsoletes": None,
+                        "pcs_deprecated_warning": "",
                     }
                 ],
                 "actions": [
@@ -1295,7 +1301,7 @@ class AgentMetadataValidateParameters(TestCase):
                     report_codes.INVALID_OPTION,
                     {
                         "option_names": ["invalid_param"],
-                        "option_type": "resource agent parameter",
+                        "option_type": "resource",
                         "allowed": [
                             "another_required_param",
                             "required_param",
@@ -1319,7 +1325,7 @@ class AgentMetadataValidateParameters(TestCase):
                             "required_param",
                             "another_required_param",
                         ],
-                        "option_type": "resource agent parameter",
+                        "option_type": "resource",
                     },
                     report_codes.FORCE_OPTIONS
                 ),
@@ -1338,7 +1344,7 @@ class AgentMetadataValidateParameters(TestCase):
                             "required_param",
                             "another_required_param",
                         ],
-                        "option_type": "resource agent parameter",
+                        "option_type": "resource",
                     },
                 ),
             ]
@@ -1366,7 +1372,7 @@ class AgentMetadataValidateParameters(TestCase):
                         "option_names": [
                             "deprecated",
                         ],
-                        "option_type": "resource agent parameter",
+                        "option_type": "resource",
                     },
                     report_codes.FORCE_OPTIONS
                 ),
@@ -1395,7 +1401,7 @@ class AgentMetadataValidateParameters(TestCase):
                         "option_names": [
                             "deprecated",
                         ],
-                        "option_type": "resource agent parameter",
+                        "option_type": "resource",
                     },
                     report_codes.FORCE_OPTIONS
                 ),
@@ -1404,7 +1410,7 @@ class AgentMetadataValidateParameters(TestCase):
                     report_codes.INVALID_OPTION,
                     {
                         "option_names": ["obsoletes"],
-                        "option_type": "resource agent parameter",
+                        "option_type": "resource",
                         "allowed": [
                             "deprecated",
                         ]
@@ -1414,6 +1420,15 @@ class AgentMetadataValidateParameters(TestCase):
             ]
         )
 
+    def test_required_not_specified_on_update(self):
+        assert_report_item_list_equal(
+            self.agent.validate_parameters({
+                "test_param": "value",
+            }, update=True),
+            [
+            ],
+        )
+
 
 class StonithdMetadataGetMetadataTest(TestCase, ExtendedAssertionsMixin):
     def setUp(self):
@@ -1510,6 +1525,7 @@ class StonithdMetadataGetParametersTest(TestCase):
                     "advanced": True,
                     "deprecated": False,
                     "obsoletes": None,
+                    "pcs_deprecated_warning": "",
                 },
                 {
                     "name": "another parameter",
@@ -1521,6 +1537,7 @@ class StonithdMetadataGetParametersTest(TestCase):
                     "advanced": False,
                     "deprecated": False,
                     "obsoletes": None,
+                    "pcs_deprecated_warning": "",
                 }
             ]
         )
@@ -1724,6 +1741,18 @@ class StonithAgentMetadataGetParametersTest(TestCase):
             self.agent.get_parameters(),
             [
                 {
+                    "name": "debug",
+                    "longdesc": "",
+                    "shortdesc": "",
+                    "type": "string",
+                    "required": False,
+                    "default": None,
+                    "advanced": False,
+                    "deprecated": False,
+                    "obsoletes": None,
+                    "pcs_deprecated_warning": "",
+                },
+                {
                     "name": "valid_param",
                     "longdesc": "",
                     "shortdesc": "",
@@ -1733,21 +1762,35 @@ class StonithAgentMetadataGetParametersTest(TestCase):
                     "advanced": False,
                     "deprecated": False,
                     "obsoletes": None,
+                    "pcs_deprecated_warning": "",
                 },
                 {
-                    "name": "action",
+                    "name": "verbose",
                     "longdesc": "",
-                    "shortdesc":
-                        "Fencing Action\nWARNING: specifying 'action' is"
-                        " deprecated and not necessary with current Pacemaker"
-                        " versions."
-                    ,
+                    "shortdesc": "",
                     "type": "string",
                     "required": False,
                     "default": None,
                     "advanced": False,
                     "deprecated": False,
                     "obsoletes": None,
+                    "pcs_deprecated_warning": "",
+                },
+                {
+                    "name": "action",
+                    "longdesc": "",
+                    "shortdesc": "Fencing Action",
+                    "type": "string",
+                    "required": False,
+                    "default": None,
+                    "advanced": True,
+                    "deprecated": False,
+                    "obsoletes": None,
+                    "pcs_deprecated_warning": "Specifying 'action' is"
+                        " deprecated and not necessary with current Pacemaker"
+                        " versions. Use 'pcmk_off_action',"
+                        " 'pcmk_reboot_action' instead."
+                    ,
                 },
                 {
                     "name": "another_param",
@@ -1759,6 +1802,7 @@ class StonithAgentMetadataGetParametersTest(TestCase):
                     "advanced": False,
                     "deprecated": False,
                     "obsoletes": None,
+                    "pcs_deprecated_warning": "",
                 },
                 {
                     "name": "stonithd_param",
@@ -1770,6 +1814,7 @@ class StonithAgentMetadataGetParametersTest(TestCase):
                     "advanced": False,
                     "deprecated": False,
                     "obsoletes": None,
+                    "pcs_deprecated_warning": "",
                 },
             ]
         )
diff --git a/pcs/lib/tools.py b/pcs/lib/tools.py
index cd2d7f9..b9d7505 100644
--- a/pcs/lib/tools.py
+++ b/pcs/lib/tools.py
@@ -9,7 +9,10 @@ import os
 
 
 def generate_key(random_bytes_count=32):
-    return binascii.hexlify(os.urandom(random_bytes_count))
+    return binascii.hexlify(generate_binary_key(random_bytes_count))
+
+def generate_binary_key(random_bytes_count):
+    return os.urandom(random_bytes_count)
 
 def environment_file_to_dict(config):
     """
diff --git a/pcs/pcs.8 b/pcs/pcs.8
index 4edfc72..f1b85ba 100644
--- a/pcs/pcs.8
+++ b/pcs/pcs.8
@@ -1,4 +1,4 @@
-.TH PCS "8" "May 2017" "pcs 0.9.158" "System Administration Utilities"
+.TH PCS "8" "June 2017" "pcs 0.9.159" "System Administration Utilities"
 .SH NAME
 pcs \- pacemaker/corosync configuration system
 .SH SYNOPSIS
@@ -76,8 +76,8 @@ Show list of all available resource agents (if filter is provided then only reso
 describe [<standard>:[<provider>:]]<type> [\fB\-\-full\fR]
 Show options for the specified resource. If \fB\-\-full\fR is specified, all options including advanced ones are shown.
 .TP
-create <resource id> [<standard>:[<provider>:]]<type> [resource options] [\fBop\fR <operation action> <operation options> [<operation action> <operation options>]...] [\fBmeta\fR <meta options>...] [\fBclone\fR [<clone options>] | \fBmaster\fR [<master options>] | \fB\-\-group\fR <group id> [\fB\-\-before\fR <resource id> | \fB\-\-after\fR <resource id>] | \fBbundle\fR <bundle id>] [\fB\-\-disabled\fR] [\fB\-\-wait\fR[=n]]
-Create specified resource. If \fBclone\fR is used a clone resource is created. If \fBmaster\fR is specified a master/slave resource is created. If \fB\-\-group\fR is specified the resource is added to the group named. You can use \fB\-\-before\fR or \fB\-\-after\fR to specify the position of the added resource relatively to some resource already existing in the group. If \fBbundle\fR is specified, resource will be created inside of the specified bundle. If \fB\-\-disabled\fR is specified [...]
+create <resource id> [<standard>:[<provider>:]]<type> [resource options] [\fBop\fR <operation action> <operation options> [<operation action> <operation options>]...] [\fBmeta\fR <meta options>...] [\fBclone\fR [<clone options>] | \fBmaster\fR [<master options>] | \fB\-\-group\fR <group id> [\fB\-\-before\fR <resource id> | \fB\-\-after\fR <resource id>] | \fBbundle\fR <bundle id>] [\fB\-\-disabled\fR] [\fB\-\-no\-default\-ops] [\fB\-\-wait\fR[=n]]
+Create specified resource. If \fBclone\fR is used a clone resource is created. If \fBmaster\fR is specified a master/slave resource is created. If \fB\-\-group\fR is specified the resource is added to the group named. You can use \fB\-\-before\fR or \fB\-\-after\fR to specify the position of the added resource relatively to some resource already existing in the group. If \fBbundle\fR is specified, resource will be created inside of the specified bundle. If \fB\-\-disabled\fR is specified [...]
 
 Example: Create a new resource called 'VirtualIP' with IP address 192.168.0.99, netmask of 32, monitored everything 30 seconds, on eth2: pcs resource create VirtualIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 nic=eth2 op monitor interval=30s
 .TP
@@ -139,7 +139,7 @@ op remove <operation id>
 Remove the specified operation id.
 .TP
 op defaults [options]
-Set default values for operations, if no options are passed, lists currently configured defaults.
+Set default values for operations, if no options are passed, lists currently configured defaults. Defaults do not apply to resources which override them with their own defined operations.
 .TP
 meta <resource id | group id | master id | clone id> <meta options> [\fB\-\-wait\fR[=n]]
 Add specified options to the specified resource, group, master/slave or clone.  Meta options should be in the format of name=value, options may be removed by setting an option without a value.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the changes to take effect and then return 0 if the changes have been processed or 1 otherwise.  If 'n' is not specified it defaults to 60 minutes.  Example: pcs resource meta TestResource failure\-timeout=50 stickiness=
@@ -162,10 +162,10 @@ Remove the clone which contains the specified group or resource (the resource or
 master [<master/slave id>] <resource id | group id> [options] [\fB\-\-wait\fR[=n]]
 Configure a resource or group as a multi\-state (master/slave) resource.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including starting and promoting resource instances if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.  Note: to remove a master you must remove the resource/group it contains.
 .TP
-bundle create <bundle id> [container [<container type>] <container options>] [network <network options>] [port\-map <port options>]... [storage\-map <storage options>]... [\fB\-\-wait\fR[=n]]
-Create a new bundle encapsulating no resources. The bundle can be used either as it is or a resource may be put into it at any time. If the container type is not specified, it defaults to 'docker'.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the bundle to start and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
+bundle create <bundle id> container <container type> [<container options>] [network <network options>] [port\-map <port options>]... [storage\-map <storage options>]... [meta <meta options>] [\fB\-\-disabled\fR] [\fB\-\-wait\fR[=n]]
+Create a new bundle encapsulating no resources. The bundle can be used either as it is or a resource may be put into it at any time. If \fB\-\-disabled\fR is specified, the bundle is not started automatically. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the bundle to start and then return 0 on success or 1 on error. If 'n' is not specified it defaults to 60 minutes.
 .TP
-bundle update <bundle id> [container <container options>] [network <network options>] [port\-map (add <port options>) | (remove <id>...)]... [storage\-map (add <storage options>) | (remove <id>...)]... [\fB\-\-wait\fR[=n]]
+bundle update <bundle id> [container <container options>] [network <network options>] [port\-map (add <port options>) | (remove <id>...)]... [storage\-map (add <storage options>) | (remove <id>...)]... [meta <meta options>] [\fB\-\-wait\fR[=n]]
 Add, remove or change options to specified bundle. If you wish to update a resource encapsulated in the bundle, use the 'pcs resource update' command instead and specify the resource id.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including moving resources if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
 .TP
 manage <resource id>... [\fB\-\-monitor\fR]
@@ -175,7 +175,7 @@ unmanage <resource id>... [\fB\-\-monitor\fR]
 Set resources listed to unmanaged mode. When a resource is in unmanaged mode, the cluster is not allowed to start nor stop the resource. If \fB\-\-monitor\fR is specified, disable all monitor operations of the resources.
 .TP
 defaults [options]
-Set default values for resources, if no options are passed, lists currently configured defaults.
+Set default values for resources, if no options are passed, lists currently configured defaults. Defaults do not apply to resources which override them with their own defined values.
 .TP
 cleanup [<resource id>] [\fB\-\-node\fR <node>]
 Make the cluster forget the operation history of the resource and re\-detect its current state. This can be useful to purge knowledge of past failures that have since been resolved. If a resource id is not specified then all resources/stonith devices will be cleaned up. If a node is not specified then resources/stonith devices on all nodes will be cleaned up.
@@ -205,7 +205,7 @@ Add specified utilization options to specified resource. If resource is not spec
 auth [node] [...] [\fB\-u\fR username] [\fB\-p\fR password] [\fB\-\-force\fR] [\fB\-\-local\fR]
 Authenticate pcs to pcsd on nodes specified, or on all nodes configured in the local cluster if no nodes are specified (authorization tokens are stored in ~/.pcs/tokens or /var/lib/pcsd/tokens for root). By default all nodes are also authenticated to each other, using \fB\-\-local\fR only authenticates the local node (and does not authenticate the remote nodes with each other). Using \fB\-\-force\fR forces re\-authentication to occur.
 .TP
-setup [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-local\fR] [\fB\-\-enable\fR] \fB\-\-name\fR <cluster name> <node1[,node1\-altaddr]> [<node2[,node2\-altaddr]>] [...] [\fB\-\-transport\fR udpu|udp] [\fB\-\-rrpmode\fR active|passive] [\fB\-\-addr0\fR <addr/net> [[[\fB\-\-mcast0\fR <address>] [\fB\-\-mcastport0\fR <port>] [\fB\-\-ttl0\fR <ttl>]] | [\fB\-\-broadcast0\fR]] [\fB\-\-addr1\fR <addr/net> [[[\fB\-\-mcast1\fR <address>] [\fB\-\-mcastport1\fR <port>] [\fB\-\-ttl1\fR <ttl>]] |  [...]
+setup [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-local\fR] [\fB\-\-enable\fR] \fB\-\-name\fR <cluster name> <node1[,node1\-altaddr]> [<node2[,node2\-altaddr]>] [...] [\fB\-\-transport\fR udpu|udp] [\fB\-\-rrpmode\fR active|passive] [\fB\-\-addr0\fR <addr/net> [[[\fB\-\-mcast0\fR <address>] [\fB\-\-mcastport0\fR <port>] [\fB\-\-ttl0\fR <ttl>]] | [\fB\-\-broadcast0\fR]] [\fB\-\-addr1\fR <addr/net> [[[\fB\-\-mcast1\fR <address>] [\fB\-\-mcastport1\fR <port>] [\fB\-\-ttl1\fR <ttl>]] |  [...]
 Configure corosync and sync configuration out to listed nodes. \fB\-\-local\fR will only perform changes on the local node, \fB\-\-start\fR will also start the cluster on the specified nodes, \fB\-\-wait\fR will wait up to 'n' seconds for the nodes to start, \fB\-\-enable\fR will enable corosync and pacemaker on node startup, \fB\-\-transport\fR allows specification of corosync transport (default: udpu; udp for CMAN clusters), \fB\-\-rrpmode\fR allows you to set the RRP mode of the syste [...]
 
 \fB\-\-ipv6\fR will configure corosync to use ipv6 (instead of ipv4).  This option is not supported on CMAN clusters.
@@ -222,6 +222,8 @@ Configure corosync and sync configuration out to listed nodes. \fB\-\-local\fR w
 
 \fB\-\-fail_recv_const\fR <failures> specifies how many rotations of the token without receiving any messages when messages should be received may occur before a new configuration is formed (default 2500 failures)
 
+\fB\-\-encryption\fR 0|1 disables (0) or enables (1) corosync communication encryption (default 0)
+
 
 Configuring Redundant Ring Protocol (RRP)
 
diff --git a/pcs/pcsd.py b/pcs/pcsd.py
index 629b4c0..7f7c660 100644
--- a/pcs/pcsd.py
+++ b/pcs/pcsd.py
@@ -79,7 +79,7 @@ def pcsd_certkey(argv):
 
     print("Certificate and key updated, you may need to restart pcsd (service pcsd restart) for new settings to take effect")
 
-def pcsd_sync_certs(argv, exit_after_error=True):
+def pcsd_sync_certs(argv, exit_after_error=True, async_restart=False):
     error = False
     nodes_sync = argv if argv else utils.getNodesFromCorosyncConf()
     nodes_restart = []
@@ -117,7 +117,9 @@ def pcsd_sync_certs(argv, exit_after_error=True):
         return
 
     print("Restarting pcsd on the nodes in order to reload the certificates...")
-    pcsd_restart_nodes(nodes_restart, exit_after_error)
+    pcsd_restart_nodes(
+        nodes_restart, exit_after_error, async_restart=async_restart
+    )
 
 def pcsd_clear_auth(argv):
     output = []
@@ -148,7 +150,7 @@ def pcsd_clear_auth(argv):
             print("Error: " + o)
         sys.exit(1)
 
-def pcsd_restart_nodes(nodes, exit_after_error=True):
+def pcsd_restart_nodes(nodes, exit_after_error=True, async_restart=False):
     pcsd_data = {
         "nodes": nodes,
     }
@@ -188,6 +190,10 @@ def pcsd_restart_nodes(nodes, exit_after_error=True):
         utils.err("Unable to restart pcsd", exit_after_error)
         return
 
+    if async_restart:
+        print("Not waiting for restart of pcsd on all nodes.")
+        return
+
     # check if the restart was performed already
     error = False
     for _ in range(5):
diff --git a/pcs/resource.py b/pcs/resource.py
index 4d5f43a..dd864b6 100644
--- a/pcs/resource.py
+++ b/pcs/resource.py
@@ -20,7 +20,7 @@ from pcs import (
 )
 from pcs.settings import pacemaker_wait_timeout_status as \
     PACEMAKER_WAIT_TIMEOUT_STATUS
-import pcs.lib.cib.acl as lib_acl
+from pcs.cli.common.console_report import error, warn
 from pcs.cli.common.errors import CmdLineInputError
 from pcs.cli.common.parse_args import prepare_options
 from pcs.cli.resource.parse_args import (
@@ -28,24 +28,36 @@ from pcs.cli.resource.parse_args import (
     parse_bundle_update_options,
     parse_create as parse_create_args,
 )
-from pcs.lib.env_tools import get_nodes
-from pcs.lib.errors import LibraryError
+import pcs.lib.cib.acl as lib_acl
+from pcs.lib.cib.resource import guest_node
+from pcs.lib.commands.resource import(
+    _validate_guest_change,
+    _get_nodes_to_validate_against,
+)
+from pcs.lib.errors import LibraryError, ReportItemSeverity
 import pcs.lib.pacemaker.live as lib_pacemaker
+from pcs.lib.pacemaker.state import (
+    get_cluster_state_dom,
+    _get_primitive_roles_with_nodes,
+    _get_primitives_for_state_check,
+)
 from pcs.lib.pacemaker.values import timeout_to_seconds
 import pcs.lib.resource_agent as lib_ra
-from pcs.cli.common.console_report import error, warn
-from pcs.lib.commands.resource import _validate_guest_change
 
 
 RESOURCE_RELOCATE_CONSTRAINT_PREFIX = "pcs-relocate-"
 
 def _detect_guest_change(meta_attributes, allow_not_suitable_command):
+    if not guest_node.is_node_name_in_options(meta_attributes):
+        return
+
     env = utils.get_lib_env()
     cib = env.get_cib()
+    nodes_to_validate_against = _get_nodes_to_validate_against(env, cib)
     env.report_processor.process_list(
         _validate_guest_change(
             cib,
-            get_nodes(env.get_corosync_conf(), cib),
+            nodes_to_validate_against,
             meta_attributes,
             allow_not_suitable_command,
             detect_remove=True,
@@ -302,6 +314,8 @@ def _format_agent_description(description, stonith=False, show_advanced=False):
                 param_desc = param.get("shortdesc", "").replace("\n", " ")
                 if not param_desc:
                     param_desc = "No description available"
+            if param.get("pcs_deprecated_warning"):
+                param_desc += " WARNING: " + param["pcs_deprecated_warning"]
             output_params.append("  {0}: {1}".format(
                 param_title,
                 _format_desc(len(param_title) + 4, param_desc)
@@ -730,37 +744,36 @@ def resource_update(res_id,args, deal_with_guest_change=True):
         instance_attributes = instance_attributes[0]
 
     params = utils.convert_args_to_tuples(ra_values)
-    if "--force" not in utils.pcs_options and (resource.getAttribute("class") == "ocf" or resource.getAttribute("class") == "stonith"):
-        resClass = resource.getAttribute("class")
-        resProvider = resource.getAttribute("provider")
-        resType = resource.getAttribute("type")
-        if resProvider == "":
-            resource_type = resClass + ":" + resType
+
+    resClass = resource.getAttribute("class")
+    resProvider = resource.getAttribute("provider")
+    resType = resource.getAttribute("type")
+    try:
+        if resClass == "stonith":
+            metadata = lib_ra.StonithAgent(utils.cmd_runner(), resType)
         else:
-            resource_type = resClass + ":" + resProvider + ":" + resType
-        bad_opts = []
-        try:
-            if resource_type.startswith("stonith:"):
-                metadata = lib_ra.StonithAgent(
-                    utils.cmd_runner(),
-                    resource_type[len("stonith:"):]
-                )
-            else:
-                metadata = lib_ra.ResourceAgent(
-                    utils.cmd_runner(),
-                    resource_type
-                )
-            bad_opts, _ = metadata.validate_parameters_values(dict(params))
-        except lib_ra.ResourceAgentError as e:
-            utils.process_library_reports(
-                [lib_ra.resource_agent_error_to_report_item(e)]
+            metadata = lib_ra.ResourceAgent(
+                utils.cmd_runner(),
+                lib_ra.ResourceAgentName(
+                    resClass, resProvider, resType
+                ).full_name
             )
-        except LibraryError as e:
-            utils.process_library_reports(e.args)
-        if len(bad_opts) != 0:
-            utils.err ("resource option(s): '%s', are not recognized for resource type: '%s' (use --force to override)" \
-                    % (", ".join(sorted(bad_opts)), utils.getResourceType(resource)))
-
+        report_list = metadata.validate_parameters(
+            dict(params),
+            allow_invalid=("--force" in utils.pcs_options),
+            update=True
+        )
+        utils.process_library_reports(report_list)
+    except lib_ra.ResourceAgentError as e:
+        severity = (
+            ReportItemSeverity.WARNING if "--force" in utils.pcs_options
+            else ReportItemSeverity.ERROR
+        )
+        utils.process_library_reports(
+            [lib_ra.resource_agent_error_to_report_item(e, severity)]
+        )
+    except LibraryError as e:
+        utils.process_library_reports(e.args)
 
     for (key,val) in params:
         ia_found = False
@@ -940,8 +953,23 @@ def resource_operation_add(
     op_properties.sort(key=lambda a:a[0])
     op_properties.insert(0, ("name", op_name))
 
-    op_id = "%s-%s-interval-%s" % (res_id, op_name, interval)
-    op_id = utils.find_unique_id(dom, op_id)
+    generate_id = True
+    for name, value in op_properties:
+        if name == "id":
+            op_id = value
+            generate_id = False
+            id_valid, id_error = utils.validate_xml_id(value, 'operation id')
+            if not id_valid:
+                utils.err(id_error)
+            if utils.does_id_exist(dom, value):
+                utils.err(
+                    "id '%s' is already in use, please specify another one"
+                    % value
+                )
+    if generate_id:
+        op_id = "%s-%s-interval-%s" % (res_id, op_name, interval)
+        op_id = utils.find_unique_id(dom, op_id)
+
     op_el = dom.createElement("op")
     op_el.setAttribute("id", op_id)
     for key, val in op_properties:
@@ -1425,6 +1453,18 @@ def resource_master_create(dom, argv, update=False, master_id=None):
     return dom, master_element.getAttribute("id")
 
 def resource_remove(resource_id, output=True, is_remove_remote_context=False):
+    def is_bundle_running(bundle_id):
+        roles_with_nodes = _get_primitive_roles_with_nodes(
+            _get_primitives_for_state_check(
+                get_cluster_state_dom(
+                    lib_pacemaker.get_cluster_status_xml(utils.cmd_runner())
+                ),
+                bundle_id,
+                expected_running=True
+            )
+        )
+        return True if roles_with_nodes else False
+
     dom = utils.get_cib_dom()
     # if resource is a clone or a master, work with its child instead
     cloned_resource = utils.dom_get_clone_ms_resource(dom, resource_id)
@@ -1434,6 +1474,40 @@ def resource_remove(resource_id, output=True, is_remove_remote_context=False):
     bundle = utils.dom_get_bundle(dom, resource_id)
     if bundle is not None:
         primitive_el = utils.dom_get_resource_bundle(bundle)
+        if primitive_el is None:
+            print("Deleting bundle '{0}'".format(resource_id))
+        else:
+            print(
+                "Deleting bundle '{0}' and its inner resource '{1}'".format(
+                    resource_id,
+                    primitive_el.getAttribute("id")
+                )
+            )
+
+        if (
+            "--force" not in utils.pcs_options
+            and
+            not utils.usefile
+            and
+            is_bundle_running(resource_id)
+        ):
+            sys.stdout.write("Stopping bundle '{0}'... ".format(resource_id))
+            sys.stdout.flush()
+            lib = utils.get_library_wrapper()
+            lib.resource.disable([resource_id], False)
+            output, retval = utils.run(["crm_resource", "--wait"])
+            # pacemaker which supports bundles supports --wait as well
+            if is_bundle_running(resource_id):
+                msg = [
+                    "Unable to stop: %s before deleting "
+                    "(re-run with --force to force deletion)"
+                    % resource_id
+                ]
+                if retval != 0 and output:
+                    msg.append("\n" + output)
+                utils.err("\n".join(msg).strip())
+            print("Stopped")
+
         if primitive_el is not None:
             resource_remove(primitive_el.getAttribute("id"))
         utils.replace_cib_configuration(
@@ -1491,7 +1565,7 @@ def resource_remove(resource_id, output=True, is_remove_remote_context=False):
             resource_remove(res.getAttribute("id"))
         sys.exit(0)
 
-    # now we know resource is not a group, a clone nor a master
+    # now we know resource is not a group, a clone, a master nor a bundle
     # because of the conditions above
     if not utils.does_exist('//resources/descendant::primitive[@id="'+resource_id+'"]'):
         utils.err("Resource '{0}' does not exist.".format(resource_id))
@@ -1510,7 +1584,7 @@ def resource_remove(resource_id, output=True, is_remove_remote_context=False):
         and
         utils.resource_running_on(resource_id)["is_running"]
     ):
-        sys.stdout.write("Attempting to stop: "+ resource_id + "...")
+        sys.stdout.write("Attempting to stop: "+ resource_id + "... ")
         sys.stdout.flush()
         lib = utils.get_library_wrapper()
         # we are not using wait from disable command, because if wait is not
@@ -2185,6 +2259,10 @@ def show_defaults(def_type, indent=""):
         print(indent + "No defaults set")
 
 def set_default(def_type, argv):
+    warn(
+        "Defaults do not apply to resources which override them with their "
+        "own defined values"
+    )
     for arg in argv:
         args = arg.split('=')
         if (len(args) != 2):
@@ -2239,6 +2317,7 @@ def print_node(node, tab = 0):
             node.findall("storage/storage-mapping"),
             spaces + " "
         )
+        print_meta_vars_string(node, spaces)
         for child in node:
             print_node(child, tab + 1)
         return
@@ -2668,12 +2747,14 @@ def resource_bundle_create_cmd(lib, argv, modifiers):
     lib.resource.bundle_create(
         bundle_id,
         parts["container_type"],
-        parts["container"],
-        parts["network"],
-        parts["port_map"],
-        parts["storage_map"],
-        modifiers["force"],
-        modifiers["wait"]
+        container_options=parts["container"],
+        network_options=parts["network"],
+        port_map=parts["port_map"],
+        storage_map=parts["storage_map"],
+        meta_attributes=parts["meta"],
+        force_options=modifiers["force"],
+        ensure_disabled=modifiers["disabled"],
+        wait=modifiers["wait"]
     )
 
 def resource_bundle_update_cmd(lib, argv, modifiers):
@@ -2684,12 +2765,13 @@ def resource_bundle_update_cmd(lib, argv, modifiers):
     parts = parse_bundle_update_options(argv[1:])
     lib.resource.bundle_update(
         bundle_id,
-        parts["container"],
-        parts["network"],
-        parts["port_map_add"],
-        parts["port_map_remove"],
-        parts["storage_map_add"],
-        parts["storage_map_remove"],
-        modifiers["force"],
-        modifiers["wait"]
+        container_options=parts["container"],
+        network_options=parts["network"],
+        port_map_add=parts["port_map_add"],
+        port_map_remove=parts["port_map_remove"],
+        storage_map_add=parts["storage_map_add"],
+        storage_map_remove=parts["storage_map_remove"],
+        meta_attributes=parts["meta"],
+        force_options=modifiers["force"],
+        wait=modifiers["wait"]
     )
diff --git a/pcs/settings_default.py b/pcs/settings_default.py
index 72c91f4..d1ac3c9 100644
--- a/pcs/settings_default.py
+++ b/pcs/settings_default.py
@@ -27,7 +27,7 @@ pengine_binary = "/usr/libexec/pacemaker/pengine"
 crmd_binary = "/usr/libexec/pacemaker/crmd"
 cib_binary = "/usr/libexec/pacemaker/cib"
 stonithd_binary = "/usr/libexec/pacemaker/stonithd"
-pcs_version = "0.9.158"
+pcs_version = "0.9.159"
 crm_report = pacemaker_binaries + "crm_report"
 crm_verify = pacemaker_binaries + "crm_verify"
 crm_mon_schema = '/usr/share/pacemaker/crm_mon.rng'
diff --git a/pcs/status.py b/pcs/status.py
index d6ade5a..33bf1d1 100644
--- a/pcs/status.py
+++ b/pcs/status.py
@@ -18,6 +18,9 @@ from pcs.quorum import quorum_status_cmd
 from pcs.cli.common.errors import CmdLineInputError
 from pcs.lib.errors import LibraryError
 from pcs.lib.pacemaker.state import ClusterState
+from pcs.lib.pacemaker.values import is_false
+from pcs.lib.resource_agent import _STONITH_ACTION_REPLACED_BY
+from pcs.lib.sbd import get_sbd_service_name
 
 def status_cmd(argv):
     if len(argv) == 0:
@@ -88,8 +91,7 @@ def full_status():
         cluster_name = utils.getClusterName()
         print("Cluster name: %s" % cluster_name)
 
-    if utils.stonithCheck():
-        print("WARNING: no stonith devices and stonith-enabled is not false")
+    status_stonith_check()
 
     if (
         not utils.usefile
@@ -108,6 +110,67 @@ def full_status():
             print()
         utils.serviceStatus("  ")
 
+def status_stonith_check():
+    # We should read the default value from pacemaker. However that may slow
+    # pcs down as we need to run 'pengine metadata' to get it.
+    stonith_enabled = True
+    stonith_devices = []
+    stonith_devices_id_action = []
+    sbd_running = False
+
+    cib = utils.get_cib_dom()
+    for conf in cib.getElementsByTagName("configuration"):
+        for crm_config in conf.getElementsByTagName("crm_config"):
+            for nvpair in crm_config.getElementsByTagName("nvpair"):
+                if (
+                    nvpair.getAttribute("name") == "stonith-enabled"
+                    and
+                    is_false(nvpair.getAttribute("value"))
+                ):
+                    stonith_enabled = False
+                    break
+            if not stonith_enabled:
+                break
+        for resource in conf.getElementsByTagName("primitive"):
+            if resource.getAttribute("class") == "stonith":
+                stonith_devices.append(resource)
+                for attribs in resource.getElementsByTagName(
+                    "instance_attributes"
+                ):
+                    for nvpair in attribs.getElementsByTagName("nvpair"):
+                        if (
+                            nvpair.getAttribute("name") == "action"
+                            and
+                            nvpair.getAttribute("value")
+                        ):
+                            stonith_devices_id_action.append(
+                                resource.getAttribute("id")
+                            )
+
+    if not utils.usefile:
+        # check if SBD daemon is running
+        try:
+            sbd_running = utils.is_service_running(
+                utils.cmd_runner(),
+                get_sbd_service_name()
+            )
+        except LibraryError:
+            pass
+
+    if stonith_enabled and not stonith_devices and not sbd_running:
+        print("WARNING: no stonith devices and stonith-enabled is not false")
+
+    if stonith_devices_id_action:
+        print(
+            "WARNING: following stonith devices have the 'action' attribute"
+            " set, it is recommended to set {0} instead: {1}".format(
+                ", ".join(
+                    ["'{0}'".format(x) for x in _STONITH_ACTION_REPLACED_BY]
+                ),
+                ", ".join(sorted(stonith_devices_id_action))
+            )
+        )
+
 # Parse crm_mon for status
 def nodes_status(argv):
     if len(argv) == 1 and argv[0] == "pacemaker-id":
diff --git a/pcs/test/cib_resource/test_bundle.py b/pcs/test/cib_resource/test_bundle.py
index d8c97c6..50ea1df 100644
--- a/pcs/test/cib_resource/test_bundle.py
+++ b/pcs/test/cib_resource/test_bundle.py
@@ -41,7 +41,7 @@ class BundleCreateUpgradeCib(BundleCreateCommon):
 
     def test_success(self):
         self.assert_effect(
-            "resource bundle create B1 container image=pcs:test",
+            "resource bundle create B1 container docker image=pcs:test",
             """
                 <resources>
                     <bundle id="B1">
@@ -59,7 +59,7 @@ class BundleCreate(BundleCreateCommon):
 
     def test_minimal(self):
         self.assert_effect(
-            "resource bundle create B1 container image=pcs:test",
+            "resource bundle create B1 container docker image=pcs:test",
             """
                 <resources>
                     <bundle id="B1">
@@ -73,8 +73,10 @@ class BundleCreate(BundleCreateCommon):
         self.assert_effect(
             """
                 resource bundle create B1
-                container replicas=4 replicas-per-host=2 run-command=/bin/true
+                container docker replicas=4 replicas-per-host=2
+                    run-command=/bin/true
                 port-map port=1001
+                meta target-role=Stopped
                 network control-port=12345 host-interface=eth0 host-netmask=24
                 port-map id=B1-port-map-1001 internal-port=2002 port=2000
                 port-map range=3000-3300
@@ -83,6 +85,7 @@ class BundleCreate(BundleCreateCommon):
                 storage-map id=B1-storage-map source-dir=/tmp/docker2a
                     target-dir=/tmp/docker2b
                 container image=pcs:test masters=0
+                meta is-managed=false
                 storage-map source-dir-root=/tmp/docker3a
                     target-dir=/tmp/docker3b
                 storage-map id=B1-port-map-1001-1 source-dir-root=/tmp/docker4a
@@ -140,6 +143,18 @@ class BundleCreate(BundleCreateCommon):
                                 target-dir="/tmp/docker4b"
                             />
                         </storage>
+                        <meta_attributes id="B1-meta_attributes">
+                            <nvpair
+                                id="B1-meta_attributes-is-managed"
+                                name="is-managed"
+                                value="false"
+                            />
+                            <nvpair
+                                id="B1-meta_attributes-target-role"
+                                name="target-role"
+                                value="Stopped"
+                            />
+                        </meta_attributes>
                     </bundle>
                 </resources>
             """
@@ -157,15 +172,24 @@ class BundleCreate(BundleCreateCommon):
             stdout_start="\nUsage: pcs resource bundle create...\n"
         )
 
-    def test_fail_when_missing_required(self):
+    def test_fail_when_missing_container_type(self):
         self.assert_pcs_fail_regardless_of_force(
             "resource bundle create B1",
+            "Error: '' is not a valid container type value, use docker\n"
+        )
+
+    def test_fail_when_missing_required(self):
+        self.assert_pcs_fail_regardless_of_force(
+            "resource bundle create B1 container docker",
             "Error: required container option 'image' is missing\n"
         )
 
     def test_fail_on_unknown_option(self):
         self.assert_pcs_fail(
-            "resource bundle create B1 container image=pcs:test extra=option",
+            """
+                resource bundle create B1 container docker image=pcs:test
+                extra=option
+            """,
             "Error: invalid container option 'extra', allowed options are: "
                 "image, masters, network, options, replicas, replicas-per-host,"
                 " run-command, use --force to override\n"
@@ -178,8 +202,8 @@ class BundleCreate(BundleCreateCommon):
         # supported by pacemaker and so the command fails.
         self.assert_pcs_fail(
             """
-                resource bundle create B1 container image=pcs:test extra=option
-                --force
+                resource bundle create B1 container docker image=pcs:test
+                extra=option --force
             """
             ,
             stdout_start="Error: Unable to update cib\n"
@@ -187,7 +211,7 @@ class BundleCreate(BundleCreateCommon):
 
     def test_more_errors(self):
         self.assert_pcs_fail_regardless_of_force(
-            "resource bundle create B#1 container replicas=x",
+            "resource bundle create B#1 container docker replicas=x",
             outdent(
                 """\
                 Error: invalid bundle name 'B#1', '#' is not a valid character for a bundle name
@@ -215,6 +239,9 @@ class BundleCreate(BundleCreateCommon):
     def test_empty_port_map(self):
         self.assert_no_options("port-map")
 
+    def test_empty_meta(self):
+        self.assert_no_options("meta")
+
 
 @skip_unless_pacemaker_supports_bundle
 class BundleUpdate(BundleCreateCommon):
@@ -222,24 +249,25 @@ class BundleUpdate(BundleCreateCommon):
 
     def fixture_bundle(self, name):
         self.assert_pcs_success(
-            "resource bundle create {0} container image=pcs:test".format(
+            "resource bundle create {0} container docker image=pcs:test".format(
                 name
             )
         )
 
     def fixture_bundle_complex(self, name):
         self.assert_pcs_success(
-            (
-                "resource bundle create {0} "
-                "container image=pcs:test replicas=4 masters=2 "
-                "network control-port=12345 host-interface=eth0 host-netmask=24 "
-                "port-map internal-port=1000 port=2000 "
-                "port-map internal-port=1001 port=2001 "
-                "port-map internal-port=1002 port=2002 "
-                "storage-map source-dir=/tmp/docker1a target-dir=/tmp/docker1b "
-                "storage-map source-dir=/tmp/docker2a target-dir=/tmp/docker2b "
-                "storage-map source-dir=/tmp/docker3a target-dir=/tmp/docker3b "
-            ).format(name)
+            ("""
+                resource bundle create {0}
+                container docker image=pcs:test replicas=4 masters=2
+                network control-port=12345 host-interface=eth0 host-netmask=24
+                port-map internal-port=1000 port=2000
+                port-map internal-port=1001 port=2001
+                port-map internal-port=1002 port=2002
+                storage-map source-dir=/tmp/docker1a target-dir=/tmp/docker1b
+                storage-map source-dir=/tmp/docker2a target-dir=/tmp/docker2b
+                storage-map source-dir=/tmp/docker3a target-dir=/tmp/docker3b
+                meta priority=15 resource-stickiness=100 is-managed=false
+            """).format(name)
         )
 
     def test_fail_when_missing_args_1(self):
@@ -282,6 +310,7 @@ class BundleUpdate(BundleCreateCommon):
                 port-map add internal-port=1003 port=2003
                 storage-map remove B-storage-map B-storage-map-2
                 storage-map add source-dir=/tmp/docker4a target-dir=/tmp/docker4b
+                meta priority=10 is-managed= target-role=Stopped
             """,
             """
                 <resources>
@@ -319,6 +348,14 @@ class BundleUpdate(BundleCreateCommon):
                                 target-dir="/tmp/docker4b"
                             />
                         </storage>
+                        <meta_attributes id="B-meta_attributes">
+                            <nvpair id="B-meta_attributes-priority"
+                                name="priority" value="10" />
+                            <nvpair id="B-meta_attributes-resource-stickiness"
+                                name="resource-stickiness" value="100" />
+                            <nvpair id="B-meta_attributes-target-role"
+                                name="target-role" value="Stopped" />
+                        </meta_attributes>
                     </bundle>
                 </resources>
             """
@@ -373,6 +410,9 @@ class BundleUpdate(BundleCreateCommon):
     def test_empty_port_map(self):
         self.assert_no_options("port-map")
 
+    def test_empty_meta(self):
+        self.assert_no_options("meta")
+
 
 @skip_unless_pacemaker_supports_bundle
 class BundleShow(TestCase, AssertPcsMixin):
@@ -385,7 +425,7 @@ class BundleShow(TestCase, AssertPcsMixin):
 
     def test_minimal(self):
         self.assert_pcs_success(
-            "resource bundle create B1 container image=pcs:test"
+            "resource bundle create B1 container docker image=pcs:test"
         )
         self.assert_pcs_success("resource show B1", outdent(
             """\
@@ -398,7 +438,8 @@ class BundleShow(TestCase, AssertPcsMixin):
         self.assert_pcs_success(
             """
                 resource bundle create B1
-                container image=pcs:test masters=2 replicas=4 options='a b c'
+                container docker image=pcs:test masters=2 replicas=4
+                    options='a b c'
             """
         )
         self.assert_pcs_success("resource show B1", outdent(
@@ -412,7 +453,7 @@ class BundleShow(TestCase, AssertPcsMixin):
         self.assert_pcs_success(
             """
                 resource bundle create B1
-                container image=pcs:test
+                container docker image=pcs:test
                 network host-interface=eth0 host-netmask=24 control-port=12345
             """
         )
@@ -428,7 +469,7 @@ class BundleShow(TestCase, AssertPcsMixin):
         self.assert_pcs_success(
             """
                 resource bundle create B1
-                container image=pcs:test
+                container docker image=pcs:test
                 port-map id=B1-port-map-1001 internal-port=2002 port=2000
                 port-map range=3000-3300
             """
@@ -447,7 +488,7 @@ class BundleShow(TestCase, AssertPcsMixin):
         self.assert_pcs_success(
             """
                 resource bundle create B1
-                container image=pcs:test
+                container docker image=pcs:test
                 storage-map source-dir=/tmp/docker1a target-dir=/tmp/docker1b
                 storage-map id=my-storage-map source-dir=/tmp/docker2a
                     target-dir=/tmp/docker2b
@@ -463,20 +504,56 @@ class BundleShow(TestCase, AssertPcsMixin):
             """
         ))
 
+    def test_meta(self):
+        self.assert_pcs_success("""
+            resource bundle create B1 container docker image=pcs:test
+            --disabled
+        """)
+        self.assert_pcs_success("resource show B1", outdent(
+            # pylint:disable=trailing-whitespace
+            """\
+             Bundle: B1
+              Docker: image=pcs:test
+              Meta Attrs: target-role=Stopped 
+            """
+        ))
+
+    def test_resource(self):
+        self.assert_pcs_success(
+            "resource bundle create B1 container docker image=pcs:test"
+        )
+        self.assert_pcs_success(
+            "resource create A ocf:pacemaker:Dummy bundle B1 --no-default-ops"
+        )
+        self.assert_pcs_success("resource show B1", outdent(
+            """\
+             Bundle: B1
+              Docker: image=pcs:test
+              Resource: A (class=ocf provider=pacemaker type=Dummy)
+               Operations: monitor interval=10 timeout=20 (A-monitor-interval-10)
+            """
+        ))
+
     def test_all(self):
         self.assert_pcs_success(
             """
                 resource bundle create B1
-                container image=pcs:test masters=2 replicas=4 options='a b c'
+                container docker image=pcs:test masters=2 replicas=4
+                    options='a b c'
                 network host-interface=eth0 host-netmask=24 control-port=12345
                 port-map id=B1-port-map-1001 internal-port=2002 port=2000
                 port-map range=3000-3300
                 storage-map source-dir=/tmp/docker1a target-dir=/tmp/docker1b
                 storage-map id=my-storage-map source-dir=/tmp/docker2a
                     target-dir=/tmp/docker2b
+                meta target-role=Stopped is-managed=false
             """
         )
+        self.assert_pcs_success(
+            "resource create A ocf:pacemaker:Dummy bundle B1 --no-default-ops"
+        )
         self.assert_pcs_success("resource show B1", outdent(
+            # pylint:disable=trailing-whitespace
             """\
              Bundle: B1
               Docker: image=pcs:test masters=2 options="a b c" replicas=4
@@ -487,5 +564,8 @@ class BundleShow(TestCase, AssertPcsMixin):
               Storage Mapping:
                source-dir=/tmp/docker1a target-dir=/tmp/docker1b (B1-storage-map)
                source-dir=/tmp/docker2a target-dir=/tmp/docker2b (my-storage-map)
+              Meta Attrs: is-managed=false target-role=Stopped 
+              Resource: A (class=ocf provider=pacemaker type=Dummy)
+               Operations: monitor interval=10 timeout=20 (A-monitor-interval-10)
             """
         ))
diff --git a/pcs/test/cib_resource/test_create.py b/pcs/test/cib_resource/test_create.py
index 2adef5a..a5f9e0f 100644
--- a/pcs/test/cib_resource/test_create.py
+++ b/pcs/test/cib_resource/test_create.py
@@ -475,6 +475,20 @@ class SuccessOperations(ResourceTest):
                 " stop, validate-all\n"
         )
 
+    def test_op_id(self):
+        self.assert_effect(
+            "resource create --no-default-ops R ocf:heartbeat:Dummy"
+                " op monitor interval=30s id=abcd"
+            ,
+            """<resources>
+                <primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
+                    <operations>
+                        <op id="abcd" interval="30s" name="monitor" />
+                    </operations>
+                </primitive>
+            </resources>"""
+        )
+
 class SuccessGroup(ResourceTest):
     def test_with_group(self):
         self.assert_effect(
@@ -888,7 +902,7 @@ class Bundle(ResourceTest):
 
     def fixture_bundle(self, name):
         self.assert_pcs_success(
-            "resource bundle create {0} container image=pcs:test".format(
+            "resource bundle create {0} container docker image=pcs:test".format(
                 name
             )
         )
@@ -1114,20 +1128,49 @@ class FailOrWarn(ResourceTest):
                 " 'IPaddr2')\n"
         )
 
-    def test_fail_on_invalid_id(self):
+    def test_fail_on_invalid_resource_id(self):
         self.assert_pcs_fail(
             "resource create #R ocf:heartbeat:Dummy",
             "Error: invalid resource name '#R',"
                 " '#' is not a valid first character for a resource name\n"
         )
 
-    def test_fail_on_existing_id(self):
+    def test_fail_on_existing_resource_id(self):
         self.assert_pcs_success("resource create R ocf:heartbeat:Dummy")
         self.assert_pcs_fail(
             "resource create R ocf:heartbeat:Dummy",
             "Error: 'R' already exists\n"
         )
 
+    def test_fail_on_invalid_operation_id(self):
+        self.assert_pcs_fail(
+            "resource create R ocf:heartbeat:Dummy op monitor interval=30 id=#O",
+            "Error: invalid operation id '#O',"
+                " '#' is not a valid first character for a operation id\n"
+        )
+
+    def test_fail_on_existing_operation_id(self):
+        self.assert_pcs_success("resource create R ocf:heartbeat:Dummy")
+        self.assert_pcs_fail(
+            "resource create S ocf:heartbeat:Dummy op monitor interval=30 id=R",
+            "Error: 'R' already exists\n"
+        )
+
+    def test_fail_on_duplicate_operation_id(self):
+        self.assert_pcs_fail(
+            "resource create R ocf:heartbeat:Dummy"
+                " op monitor interval=30 id=O"
+                " op monitor interval=60 id=O"
+            ,
+            "Error: 'O' already exists\n"
+        )
+
+    def test_fail_on_resource_id_same_as_operation_id(self):
+        self.assert_pcs_fail(
+            "resource create R ocf:heartbeat:Dummy op monitor interval=30 id=R",
+            "Error: 'R' already exists\n"
+        )
+
     def test_fail_on_unknown_operation(self):
         self.assert_pcs_fail(
             "resource create R ocf:heartbeat:Dummy op monitro interval=100",
diff --git a/pcs/test/cib_resource/test_manage_unmanage.py b/pcs/test/cib_resource/test_manage_unmanage.py
index 5b78646..2a87cd3 100644
--- a/pcs/test/cib_resource/test_manage_unmanage.py
+++ b/pcs/test/cib_resource/test_manage_unmanage.py
@@ -18,6 +18,7 @@ class ManageUnmanage(
     TestCase,
     get_assert_pcs_effect_mixin(
         lambda cib: etree.tostring(
+            # pylint:disable=undefined-variable
             etree.parse(cib).findall(".//resources")[0]
         )
     )
@@ -234,7 +235,7 @@ class ManageUnmanage(
 
         self.assert_pcs_fail(
             "resource unmanage A B",
-            "Error: resource/clone/master/group 'B' does not exist\n"
+            "Error: resource/clone/master/group/bundle 'B' does not exist\n"
         )
         self.assert_resources_xml_in_cib(
             """
@@ -255,7 +256,7 @@ class ManageUnmanage(
 
         self.assert_pcs_fail(
             "resource manage A B",
-            "Error: resource/clone/master/group 'B' does not exist\n"
+            "Error: resource/clone/master/group/bundle 'B' does not exist\n"
         )
         self.assert_resources_xml_in_cib(
             """
diff --git a/pcs/test/cib_resource/test_operation_add.py b/pcs/test/cib_resource/test_operation_add.py
index a842643..7b43754 100644
--- a/pcs/test/cib_resource/test_operation_add.py
+++ b/pcs/test/cib_resource/test_operation_add.py
@@ -14,11 +14,13 @@ from pcs.test.tools.pcs_runner import PcsRunner
 from pcs.test.tools.pcs_unittest import TestCase
 
 
-class Success(
+class OperationAdd(
     TestCase,
     get_assert_pcs_effect_mixin(get_cib_resources)
 ):
     temp_cib = rc("temp-cib.xml")
+    empty_cib = rc("cib-empty.xml")
+
     def setUp(self):
         self.prepare_cib_file()
         self.pcs_runner = PcsRunner(self.temp_cib)
@@ -33,7 +35,7 @@ class Success(
         return self.__class__.cib_cache
 
     def fixture_cib(self):
-        shutil.copy(rc('cib-empty-1.2.xml'), self.temp_cib)
+        shutil.copy(self.empty_cib, self.temp_cib)
         self.pcs_runner = PcsRunner(self.temp_cib)
         self.assert_pcs_success(
             "resource create --no-default-ops R ocf:heartbeat:Dummy"
@@ -50,7 +52,7 @@ class Success(
 
         #clean
         self.pcs_runner = None
-        shutil.copy(rc('cib-empty-1.2.xml'), self.temp_cib)
+        shutil.copy(self.empty_cib, self.temp_cib)
 
         return cib_content
 
@@ -133,3 +135,31 @@ class Success(
                 </primitive>
             </resources>"""
         )
+
+    def test_id_specified(self):
+        self.assert_effect(
+            "resource op add R start timeout=30 id=abcd",
+            """<resources>
+                <primitive class="ocf" id="R" provider="heartbeat" type="Dummy">
+                    <operations>
+                        <op id="R-monitor-interval-10" interval="10"
+                            name="monitor" timeout="20"
+                        />
+                        <op id="abcd" interval="0s" name="start" timeout="30" />
+                    </operations>
+                </primitive>
+            </resources>"""
+        )
+
+    def test_invalid_id(self):
+        self.assert_pcs_fail_regardless_of_force(
+            "resource op add R start timeout=30 id=ab#cd",
+            "Error: invalid operation id 'ab#cd', '#' is not a valid"
+                " character for a operation id\n"
+        )
+
+    def test_duplicate_id(self):
+        self.assert_pcs_fail_regardless_of_force(
+            "resource op add R start timeout=30 id=R",
+            "Error: id 'R' is already in use, please specify another one\n"
+        )
diff --git a/pcs/test/cib_resource/test_stonith_create.py b/pcs/test/cib_resource/test_stonith_create.py
index 8993d8d..ed3afc6 100644
--- a/pcs/test/cib_resource/test_stonith_create.py
+++ b/pcs/test/cib_resource/test_stonith_create.py
@@ -104,6 +104,65 @@ class PlainStonith(ResourceTest):
             </resources>"""
         )
 
+    def test_debug_and_verbose_allowed(self):
+        self.assert_effect(
+            "stonith create S fence_apc login=l ipaddr=i verbose=v debug=d",
+            """<resources>
+                <primitive class="stonith" id="S" type="fence_apc">
+                    <instance_attributes id="S-instance_attributes">
+                        <nvpair id="S-instance_attributes-debug"
+                            name="debug" value="d"
+                        />
+                        <nvpair id="S-instance_attributes-ipaddr"
+                            name="ipaddr" value="i"
+                        />
+                        <nvpair id="S-instance_attributes-login"
+                            name="login" value="l"
+                        />
+                        <nvpair id="S-instance_attributes-verbose"
+                            name="verbose" value="v"
+                        />
+                    </instance_attributes>
+                    <operations>
+                        <op id="S-monitor-interval-60s" interval="60s"
+                            name="monitor"
+                        />
+                    </operations>
+                </primitive>
+            </resources>"""
+        )
+
+    @need_load_xvm_fence_agent
+    def test_error_when_action_specified(self):
+        self.assert_pcs_fail(
+            "stonith create S fence_xvm action=reboot",
+            "Error: stonith option 'action' is deprecated and should not be"
+                " used, use pcmk_off_action, pcmk_reboot_action instead, use"
+                " --force to override\n"
+        )
+
+    @need_load_xvm_fence_agent
+    def test_warn_when_action_specified_forced(self):
+        self.assert_effect(
+            "stonith create S fence_xvm action=reboot --force",
+            """<resources>
+                <primitive class="stonith" id="S" type="fence_xvm">
+                    <instance_attributes id="S-instance_attributes">
+                        <nvpair id="S-instance_attributes-action"
+                            name="action" value="reboot"
+                        />
+                    </instance_attributes>
+                    <operations>
+                        <op id="S-monitor-interval-60s" interval="60s"
+                            name="monitor"
+                        />
+                    </operations>
+                </primitive>
+            </resources>""",
+            "Warning: stonith option 'action' is deprecated and should not be"
+                " used, use pcmk_off_action, pcmk_reboot_action instead\n"
+        )
+
 
 class WithMeta(ResourceTest):
     @need_load_xvm_fence_agent
diff --git a/pcs/test/test_cluster.py b/pcs/test/test_cluster.py
index 2b7fd5a..76930c7 100644
--- a/pcs/test/test_cluster.py
+++ b/pcs/test/test_cluster.py
@@ -233,6 +233,7 @@ Warning: Unable to resolve hostname: nonexistant-address.invalid
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udpu
 }
 
@@ -291,6 +292,7 @@ Error: {0} already exists, use --force to overwrite
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udpu
 }
 
@@ -417,6 +419,110 @@ Error: {0} already exists, use --force to overwrite
 </cluster>
 """)
 
+    def test_cluster_setup_encryption_enabled(self):
+        if utils.is_rhel6():
+            return
+
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --encryption=1"
+            .format(corosync_conf_tmp)
+        )
+        self.assertEqual("", output)
+        self.assertEqual(0, returnVal)
+        with open(corosync_conf_tmp) as f:
+            data = f.read()
+            ac(data, """\
+totem {
+    version: 2
+    cluster_name: cname
+    transport: udpu
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1.localhost
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2.localhost
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    two_node: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
+
+    def test_cluster_setup_encryption_disabled(self):
+        if utils.is_rhel6():
+            return
+
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --encryption=0"
+            .format(corosync_conf_tmp)
+        )
+        self.assertEqual("", output)
+        self.assertEqual(0, returnVal)
+        with open(corosync_conf_tmp) as f:
+            data = f.read()
+            ac(data, """\
+totem {
+    version: 2
+    cluster_name: cname
+    secauth: off
+    transport: udpu
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1.localhost
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2.localhost
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    two_node: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
+
+    def test_cluster_setup_encryption_bad_value(self):
+        if utils.is_rhel6():
+            return
+
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --encryption=bad"
+            .format(corosync_conf_tmp)
+        )
+        self.assertEqual(
+            "Error: 'bad' is not a valid --encryption value, use 0, 1\n",
+            output
+        )
+        self.assertEqual(1, returnVal)
+
     def test_cluster_setup_2_nodes_no_atb(self):
         # Setup a 2 node cluster and make sure the two node config is set, then
         # add a node and make sure that it's unset, then remove a node and make
@@ -437,6 +543,7 @@ Error: {0} already exists, use --force to overwrite
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udpu
 }
 
@@ -477,6 +584,7 @@ logging {
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udpu
 }
 
@@ -521,6 +629,7 @@ logging {
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udpu
 }
 
@@ -561,6 +670,7 @@ logging {
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udpu
 }
 
@@ -606,6 +716,7 @@ logging {
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udpu
 }
 
@@ -647,6 +758,7 @@ logging {
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udpu
 }
 
@@ -688,6 +800,7 @@ logging {
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udpu
 }
 
@@ -728,6 +841,7 @@ logging {
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udpu
 }
 
@@ -773,6 +887,7 @@ logging {
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udpu
 }
 
@@ -818,6 +933,7 @@ logging {
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udpu
 }
 
@@ -867,6 +983,7 @@ logging {
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udp
 }
 
@@ -1267,6 +1384,7 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udpu
     ip_version: ipv6
 }
@@ -1374,6 +1492,7 @@ Warning: --ipv6 ignored as it is not supported on CMAN clusters
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udp
     rrp_mode: passive
 
@@ -1432,6 +1551,7 @@ logging {
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udp
     rrp_mode: passive
 
@@ -1490,6 +1610,7 @@ logging {
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udp
     rrp_mode: passive
 
@@ -1548,6 +1669,7 @@ logging {
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udp
     rrp_mode: passive
 
@@ -1615,6 +1737,7 @@ logging {
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udp
     rrp_mode: active
 
@@ -1680,6 +1803,7 @@ logging {
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udp
     rrp_mode: active
 
@@ -1755,6 +1879,7 @@ logging {
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: udpu
     rrp_mode: passive
 }
@@ -1843,6 +1968,7 @@ logging {
 totem {
     version: 2
     cluster_name: test99
+    secauth: off
     transport: udpu
 }
 
@@ -2427,6 +2553,7 @@ Warning: --last_man_standing_window ignored as it is not supported on CMAN clust
 totem {
     version: 2
     cluster_name: test99
+    secauth: off
     transport: udpu
     token: 20000
     token_coefficient: 20005
@@ -2670,6 +2797,7 @@ Warning: --token_coefficient ignored as it is not supported on CMAN clusters
 totem {
     version: 2
     cluster_name: cname
+    secauth: off
     transport: unknown
 }
 
diff --git a/pcs/test/test_cluster_pcmk_remote.py b/pcs/test/test_cluster_pcmk_remote.py
index 5dc1633..0db4a5c 100644
--- a/pcs/test/test_cluster_pcmk_remote.py
+++ b/pcs/test/test_cluster_pcmk_remote.py
@@ -399,11 +399,11 @@ class NodeRemoveRemote(ResourceTest):
         self.assert_effect(
             "cluster node remove-remote NODE-HOST",
             "<resources/>",
-            outdent(
+            fixture_nolive_remove_report(["NODE-HOST"]) + outdent(
                 """\
                 Deleting Resource - NODE-NAME
                 """
-            ) + fixture_nolive_remove_report(["NODE-HOST"])
+            )
         )
 
     def test_success_remove_by_node_name(self):
@@ -411,11 +411,11 @@ class NodeRemoveRemote(ResourceTest):
         self.assert_effect(
             "cluster node remove-remote NODE-NAME",
             "<resources/>",
-            outdent(
+            fixture_nolive_remove_report(["NODE-HOST"]) + outdent(
                 """\
                 Deleting Resource - NODE-NAME
                 """
-            ) + fixture_nolive_remove_report(["NODE-HOST"])
+            )
         )
 
     def test_refuse_on_duplicit(self):
@@ -431,13 +431,17 @@ class NodeRemoveRemote(ResourceTest):
         self.assert_effect(
             "cluster node remove-remote HOST-A --force",
             "<resources/>",
+
+            "Warning: multiple resource for 'HOST-A' found: 'HOST-A', 'NODE-NAME'\n"
+            +
+            fixture_nolive_remove_report(["HOST-A", "HOST-B"])
+            +
             outdent(
                 """\
-                Warning: multiple resource for 'HOST-A' found: 'HOST-A', 'NODE-NAME'
                 Deleting Resource - NODE-NAME
                 Deleting Resource - HOST-A
                 """
-            ) + fixture_nolive_remove_report(["HOST-A", "HOST-B"])
+            )
         )
 
 class NodeRemoveGuest(ResourceTest):
diff --git a/pcs/test/test_constraints.py b/pcs/test/test_constraints.py
index 69d955d..4160b01 100644
--- a/pcs/test/test_constraints.py
+++ b/pcs/test/test_constraints.py
@@ -3246,7 +3246,7 @@ class Bundle(ConstraintEffect):
 
     def fixture_bundle(self, name):
         self.assert_pcs_success(
-            "resource bundle create {0} container image=pcs:test".format(
+            "resource bundle create {0} container docker image=pcs:test".format(
                 name
             )
         )
diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
index 96eae8f..19c32ce 100644
--- a/pcs/test/test_resource.py
+++ b/pcs/test/test_resource.py
@@ -8,6 +8,7 @@ from __future__ import (
 from lxml import etree
 import re
 import shutil
+from textwrap import dedent
 
 from pcs.test.tools import pcs_unittest as unittest
 from pcs.test.tools.assertions import AssertPcsMixin
@@ -660,48 +661,90 @@ monitor interval=60s OCF_CHECK_LEVEL=1 (OPTest7-monitor-interval-60s)
             "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2"
                 " cidr_netmask=32 ip=192.168.0.99 op monitor interval=30s"
         )
+        self.assert_pcs_success("resource show ClusterIP", outdent(
+            """\
+             Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+              Attributes: cidr_netmask=32 ip=192.168.0.99
+              Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
+            """
+        ))
 
-        line = 'resource update ClusterIP op monitor interval=32s'
-        output, returnVal = pcs(temp_cib, line)
-        assert returnVal == 0
-        assert output == ""
+        self.assert_pcs_success(
+            "resource update ClusterIP op monitor interval=32s"
+        )
+        self.assert_pcs_success("resource show ClusterIP", outdent(
+            """\
+             Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+              Attributes: cidr_netmask=32 ip=192.168.0.99
+              Operations: monitor interval=32s (ClusterIP-monitor-interval-32s)
+            """
+        ))
 
-        line = 'resource update ClusterIP op monitor interval=33s start interval=30s timeout=180s'
-        output, returnVal = pcs(temp_cib, line)
-        assert returnVal == 0
-        assert output == ""
+        show_clusterip = outdent(
+            """\
+             Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+              Attributes: cidr_netmask=32 ip=192.168.0.99
+              Operations: monitor interval=33s (ClusterIP-monitor-interval-33s)
+                          start interval=30s timeout=180s (ClusterIP-start-interval-30s)
+            """
+        )
+        self.assert_pcs_success(
+            "resource update ClusterIP op monitor interval=33s start interval=30s timeout=180s"
+        )
+        self.assert_pcs_success("resource show ClusterIP", show_clusterip)
 
-        line = 'resource update ClusterIP op monitor interval=33s start interval=30s timeout=180s'
-        output, returnVal = pcs(temp_cib, line)
-        assert returnVal == 0
-        assert output == ""
+        self.assert_pcs_success(
+            "resource update ClusterIP op monitor interval=33s start interval=30s timeout=180s"
+        )
+        self.assert_pcs_success("resource show ClusterIP", show_clusterip)
 
-        line = 'resource update ClusterIP op'
-        output, returnVal = pcs(temp_cib, line)
-        assert returnVal == 0
-        assert output == ""
+        self.assert_pcs_success("resource update ClusterIP op")
+        self.assert_pcs_success("resource show ClusterIP", show_clusterip)
 
-        line = 'resource update ClusterIP op monitor'
-        output, returnVal = pcs(temp_cib, line)
-        assert returnVal == 0
-        assert output == ""
+        self.assert_pcs_success("resource update ClusterIP op monitor")
+        self.assert_pcs_success("resource show ClusterIP", show_clusterip)
+
+        # test invalid id
+        self.assert_pcs_fail_regardless_of_force(
+            "resource update ClusterIP op monitor interval=30 id=ab#cd",
+            "Error: invalid operation id 'ab#cd', '#' is not a valid character"
+                " for a operation id\n"
+        )
+        self.assert_pcs_success("resource show ClusterIP", show_clusterip)
 
+        # test existing id
+        self.assert_pcs_fail_regardless_of_force(
+            "resource update ClusterIP op monitor interval=30 id=ClusterIP",
+            "Error: id 'ClusterIP' is already in use, please specify another"
+                " one\n"
+        )
+        self.assert_pcs_success("resource show ClusterIP", show_clusterip)
+
+        # test id change
+        # there is a bug:
+        # - first an existing operation is removed
+        # - then a new operation is created at the same place
+        # - therefore options not specified for in the command are removed
+        #    instead of them being kept from the old operation
+        # This needs to be fixed. However it's not my task currently.
+        # Moreover it is documented behavior.
+        self.assert_pcs_success("resource update ClusterIP op monitor id=abcd")
         self.assert_pcs_success("resource show ClusterIP", outdent(
             """\
              Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
               Attributes: cidr_netmask=32 ip=192.168.0.99
-              Operations: monitor interval=33s (ClusterIP-monitor-interval-33s)
+              Operations: monitor interval=60s (abcd)
                           start interval=30s timeout=180s (ClusterIP-start-interval-30s)
             """
         ))
 
-        output, returnVal = pcs(
-            temp_cib,
+
+        # test two monitor operations:
+        # - the first one is updated
+        # - operation duplicity detection test
+        self.assert_pcs_success(
             "resource create A ocf:heartbeat:Dummy op monitor interval=10 op monitor interval=20"
         )
-        ac(output, "")
-        self.assertEqual(0, returnVal)
-
         self.assert_pcs_success("resource show A", outdent(
             """\
              Resource: A (class=ocf provider=heartbeat type=Dummy)
@@ -739,6 +782,8 @@ monitor interval=20 (A-monitor-interval-20)
             """
         ))
 
+
+
         output, returnVal = pcs(
             temp_cib,
             "resource create B ocf:heartbeat:Dummy --no-default-ops"
@@ -1799,13 +1844,18 @@ Ticket Constraints:
                 " allowed options are: fake, state, trace_file, trace_ra\n"
         )
 
-        output, returnVal = pcs(temp_cib, "resource update D0 test=testA test2=testB")
-        assert returnVal == 1
-        assert output == "Error: resource option(s): 'test, test2', are not recognized for resource type: 'ocf:heartbeat:Dummy' (use --force to override)\n", [output]
+        self.assert_pcs_fail(
+            "resource update D0 test=testA test2=testB",
+            "Error: invalid resource options: 'test', 'test2', allowed options"
+                " are: fake, state, trace_file, trace_ra, use --force to"
+                " override\n"
+        )
 
-        output, returnVal = pcs(temp_cib, "resource update --force D0 test=testB test2=testC test3=testD")
-        assert returnVal == 0
-        assert output == "", [output]
+        self.assert_pcs_success(
+            "resource update D0 test=testB test2=testC test3=testD --force",
+            "Warning: invalid resource options: 'test', 'test2', 'test3',"
+                " allowed options are: fake, state, trace_file, trace_ra\n"
+        )
 
         self.assert_pcs_success("resource show D0", outdent(
             # pylint:disable=trailing-whitespace
@@ -1816,7 +1866,6 @@ Ticket Constraints:
               Operations: monitor interval=35 (D0-monitor-interval-35)
             """
         ))
-        assert returnVal == 0
 
     def testMetaAttrs(self):
         # see also BundleMiscCommands
@@ -1837,9 +1886,11 @@ Ticket Constraints:
                 " options are: fake, state, trace_file, trace_ra\n"
         )
 
-        output, returnVal = pcs(temp_cib, "resource update --force D0 test=testC test2=test2a op monitor interval=35 meta test7=test7a test6=")
-        assert returnVal == 0
-        assert output == "", [output]
+        self.assert_pcs_success(
+            "resource update --force D0 test=testC test2=test2a op monitor interval=35 meta test7=test7a test6=",
+            "Warning: invalid resource options: 'test', 'test2', allowed"
+                " options are: fake, state, trace_file, trace_ra\n"
+        )
 
         output, returnVal = pcs(temp_cib, "resource meta D1 d1meta=superd1meta")
         assert returnVal == 0
@@ -2469,20 +2520,51 @@ Ticket Constraints:
         ))
 
     def testLSBResource(self):
-        output, returnVal  = pcs(
-            temp_cib,
-            "resource create --no-default-ops D2 lsb:network"
+        self.assert_pcs_fail(
+            "resource create --no-default-ops D2 lsb:network foo=bar",
+            "Error: invalid resource option 'foo', there are no options"
+                " allowed, use --force to override\n"
         )
-        assert returnVal == 0
-        assert output == "", [output]
 
-        output, returnval = pcs(temp_cib, "resource update D2 blah=blah")
-        assert returnval == 0
-        assert output == "", [output]
+        self.assert_pcs_success(
+            "resource create --no-default-ops D2 lsb:network foo=bar --force",
+            "Warning: invalid resource option 'foo', there are no options"
+                " allowed\n"
+        )
 
-        output, returnval = pcs(temp_cib, "resource update D2")
-        assert returnval == 0
-        assert output == "", [output]
+        self.assert_pcs_success(
+            "resource show --full",
+            outdent(
+                """\
+                 Resource: D2 (class=lsb type=network)
+                  Attributes: foo=bar
+                  Operations: monitor interval=15 timeout=15 (D2-monitor-interval-15)
+                """
+            )
+        )
+
+        self.assert_pcs_fail(
+            "resource update D2 bar=baz",
+            "Error: invalid resource option 'bar', there are no options"
+                " allowed, use --force to override\n"
+        )
+
+        self.assert_pcs_success(
+            "resource update D2 bar=baz --force",
+            "Warning: invalid resource option 'bar', there are no options"
+                " allowed\n"
+        )
+
+        self.assert_pcs_success(
+            "resource show --full",
+            outdent(
+                """\
+                 Resource: D2 (class=lsb type=network)
+                  Attributes: foo=bar bar=baz
+                  Operations: monitor interval=15 timeout=15 (D2-monitor-interval-15)
+                """
+            )
+        )
 
     def testResourceMoveBanClear(self):
         # Load nodes into cib so move will work
@@ -3321,11 +3403,11 @@ Error: Cannot remove more than one resource from cloned group
 
         # bad resource name
         o,r = pcs(temp_cib, "resource enable NoExist")
-        ac(o,"Error: resource/clone/master/group 'NoExist' does not exist\n")
+        ac(o,"Error: resource/clone/master/group/bundle 'NoExist' does not exist\n")
         assert r == 1
 
         o,r = pcs(temp_cib, "resource disable NoExist")
-        ac(o,"Error: resource/clone/master/group 'NoExist' does not exist\n")
+        ac(o,"Error: resource/clone/master/group/bundle 'NoExist' does not exist\n")
         assert r == 1
 
         # cloned group
@@ -3829,7 +3911,7 @@ Error: Cannot remove more than one resource from cloned group
 
         self.assert_pcs_fail_regardless_of_force(
             "resource enable dummy3 dummyX",
-            "Error: resource/clone/master/group 'dummyX' does not exist\n"
+            "Error: resource/clone/master/group/bundle 'dummyX' does not exist\n"
         )
         self.assert_pcs_success(
             "resource show --full",
@@ -3849,7 +3931,7 @@ Error: Cannot remove more than one resource from cloned group
 
         self.assert_pcs_fail_regardless_of_force(
             "resource disable dummy1 dummyX",
-            "Error: resource/clone/master/group 'dummyX' does not exist\n"
+            "Error: resource/clone/master/group/bundle 'dummyX' does not exist\n"
         )
         self.assert_pcs_success(
             "resource show --full",
@@ -4709,7 +4791,7 @@ class BundleCommon(
 
     def fixture_bundle(self, name):
         self.assert_pcs_success(
-            "resource bundle create {0} container image=pcs:test".format(
+            "resource bundle create {0} container docker image=pcs:test".format(
                 name
             )
         )
@@ -4719,7 +4801,11 @@ class BundleCommon(
 class BundleDeleteTest(BundleCommon):
     def test_without_primitive(self):
         self.fixture_bundle("B")
-        self.assert_effect("resource delete B", "<resources/>")
+        self.assert_effect(
+            "resource delete B",
+            "<resources/>",
+            "Deleting bundle 'B'\n"
+        )
 
     def test_with_primitive(self):
         self.fixture_bundle("B")
@@ -4727,7 +4813,10 @@ class BundleDeleteTest(BundleCommon):
         self.assert_effect(
             "resource delete B",
             "<resources/>",
-            "Deleting Resource - R\n",
+            dedent("""\
+                Deleting bundle 'B' and its inner resource 'R'
+                Deleting Resource - R
+            """),
         )
 
     def test_remove_primitive(self):
@@ -4823,30 +4912,26 @@ class BundleCloneMaster(BundleCommon):
 class BundleMiscCommands(BundleCommon):
     def test_resource_enable_bundle(self):
         self.fixture_bundle("B")
-        self.assert_pcs_fail_regardless_of_force(
-            "resource enable B",
-            "Error: 'B' is not clone/master/a group/primitive\n"
+        self.assert_pcs_success(
+            "resource enable B"
         )
 
     def test_resource_disable_bundle(self):
         self.fixture_bundle("B")
-        self.assert_pcs_fail_regardless_of_force(
-            "resource disable B",
-            "Error: 'B' is not clone/master/a group/primitive\n"
+        self.assert_pcs_success(
+            "resource disable B"
         )
 
     def test_resource_manage_bundle(self):
         self.fixture_bundle("B")
-        self.assert_pcs_fail_regardless_of_force(
-            "resource manage B",
-            "Error: 'B' is not clone/master/a group/primitive\n"
+        self.assert_pcs_success(
+            "resource manage B"
         )
 
     def test_resource_unmanage_bundle(self):
         self.fixture_bundle("B")
-        self.assert_pcs_fail_regardless_of_force(
-            "resource unmanage B",
-            "Error: 'B' is not clone/master/a group/primitive\n"
+        self.assert_pcs_success(
+            "resource unmanage B"
         )
 
     def test_op_add(self):
diff --git a/pcs/test/test_status.py b/pcs/test/test_status.py
new file mode 100644
index 0000000..09af303
--- /dev/null
+++ b/pcs/test/test_status.py
@@ -0,0 +1,89 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import shutil
+from textwrap import dedent
+
+from pcs.test.tools.assertions import AssertPcsMixin
+from pcs.test.tools.misc import get_test_resource as rc
+from pcs.test.tools.pcs_runner import PcsRunner
+from pcs.test.tools.pcs_unittest import TestCase
+
+
+class StonithWarningTest(TestCase, AssertPcsMixin):
+    empty_cib = rc("cib-empty.xml")
+    temp_cib = rc("temp-cib.xml")
+
+    def setUp(self):
+        shutil.copy(self.empty_cib, self.temp_cib)
+        self.pcs_runner = PcsRunner(self.temp_cib)
+
+    def fixture_stonith(self, action=False):
+        self.assert_pcs_success(
+            "stonith create S fence_apc ipaddr=i login=l {0} --force".format(
+                "action=reboot" if action else ""
+            ),
+            "Warning: stonith option 'action' is deprecated and should not be"
+                " used, use pcmk_off_action, pcmk_reboot_action instead\n"
+            if action
+            else ""
+        )
+
+    def fixture_resource(self):
+        self.assert_pcs_success(
+            "resource create dummy ocf:pacemaker:Dummy action=reboot --force",
+            "Warning: invalid resource option 'action', allowed options are: "
+                "envfile, fail_start_on, fake, op_sleep, passwd, state,"
+                " trace_file, trace_ra\n"
+        )
+
+    def test_warning_stonith_action(self):
+        self.fixture_stonith(action=True)
+        self.assert_pcs_success(
+            "status",
+            stdout_start=dedent("""\
+                Cluster name: test99
+                WARNING: following stonith devices have the 'action' attribute set, it is recommended to set 'pcmk_off_action', 'pcmk_reboot_action' instead: S
+                Stack: unknown
+                Current DC: NONE
+            """)
+        )
+
+    def test_action_ignored_for_non_stonith_resources(self):
+        self.fixture_stonith(action=False)
+        self.fixture_resource()
+
+        self.assert_pcs_success(
+            "status",
+            stdout_start=dedent("""\
+                Cluster name: test99
+                Stack: unknown
+                Current DC: NONE
+            """)
+        )
+
+    def test_warn_when_no_stonith(self):
+        self.assert_pcs_success(
+            "status",
+            stdout_start=dedent("""\
+                Cluster name: test99
+                WARNING: no stonith devices and stonith-enabled is not false
+                Stack: unknown
+                Current DC: NONE
+            """)
+        )
+
+    def test_disabled_stonith_does_not_care_about_missing_devices(self):
+        self.assert_pcs_success("property set stonith-enabled=false")
+        self.assert_pcs_success(
+            "status",
+            stdout_start=dedent("""\
+                Cluster name: test99
+                Stack: unknown
+                Current DC: NONE
+            """)
+        )
diff --git a/pcs/test/test_stonith.py b/pcs/test/test_stonith.py
index f1c2d75..4a2073f 100644
--- a/pcs/test/test_stonith.py
+++ b/pcs/test/test_stonith.py
@@ -107,28 +107,28 @@ class StonithTest(TestCase, AssertPcsMixin):
 
         self.assert_pcs_fail(
             "stonith create test2 fence_apc",
-            "Error: required resource options 'ipaddr', 'login' are missing, use --force to override\n"
+            "Error: required stonith options 'ipaddr', 'login' are missing, use --force to override\n"
         )
 
         self.assert_pcs_success(
             "stonith create test2 fence_apc --force",
-            "Warning: required resource options 'ipaddr', 'login' are missing\n"
+            "Warning: required stonith options 'ipaddr', 'login' are missing\n"
         )
 
         self.assert_pcs_fail(
             "stonith create test3 fence_apc bad_argument=test",
-            stdout_start="Error: invalid resource option 'bad_argument',"
+            stdout_start="Error: invalid stonith option 'bad_argument',"
                 " allowed options are:"
         )
 
         self.assert_pcs_fail(
             "stonith create test9 fence_apc pcmk_status_action=xxx",
-            "Error: required resource options 'ipaddr', 'login' are missing, use --force to override\n"
+            "Error: required stonith options 'ipaddr', 'login' are missing, use --force to override\n"
         )
 
         self.assert_pcs_success(
              "stonith create test9 fence_apc pcmk_status_action=xxx --force",
-            "Warning: required resource options 'ipaddr', 'login' are missing\n"
+            "Warning: required stonith options 'ipaddr', 'login' are missing\n"
         )
 
         output, returnVal = pcs(temp_cib, "stonith show test9")
@@ -146,12 +146,12 @@ class StonithTest(TestCase, AssertPcsMixin):
 
         self.assert_pcs_fail(
             "stonith create test3 fence_ilo ipaddr=test",
-            "Error: required resource option 'login' is missing, use --force to override\n"
+            "Error: required stonith option 'login' is missing, use --force to override\n"
         )
 
         self.assert_pcs_success(
              "stonith create test3 fence_ilo ipaddr=test --force",
-            "Warning: required resource option 'login' is missing\n"
+            "Warning: required stonith option 'login' is missing\n"
         )
 
 # Testing that pcmk_host_check, pcmk_host_list & pcmk_host_map are allowed for
@@ -177,9 +177,13 @@ class StonithTest(TestCase, AssertPcsMixin):
             "Deleting Resource - apc-fencing\n"
         )
 
-        output, returnVal = pcs(temp_cib, "stonith update test3 bad_ipaddr=test")
-        assert returnVal == 1
-        assert output == "Error: resource option(s): 'bad_ipaddr', are not recognized for resource type: 'stonith::fence_ilo' (use --force to override)\n",[output]
+        self.assert_pcs_fail(
+            "stonith update test3 bad_ipaddr=test",
+            stdout_regexp=(
+                "^Error: invalid stonith option 'bad_ipaddr', allowed options"
+                " are: [^\n]+, use --force to override\n$"
+            )
+        )
 
         output, returnVal = pcs(temp_cib, "stonith update test3 login=testA")
         assert returnVal == 0
@@ -203,7 +207,7 @@ class StonithTest(TestCase, AssertPcsMixin):
 
         self.assert_pcs_success(
             "stonith create test-fencing fence_apc 'pcmk_host_list=rhel7-node1 rhel7-node2' op monitor interval=61s --force",
-            "Warning: required resource options 'ipaddr', 'login' are missing\n"
+            "Warning: required stonith options 'ipaddr', 'login' are missing\n"
         )
 
         self.assert_pcs_success("config show", outdent(
@@ -349,6 +353,103 @@ class StonithTest(TestCase, AssertPcsMixin):
 """)
         self.assertEqual(0, returnVal)
 
+    def test_stonith_create_action(self):
+        self.assert_pcs_fail(
+            "stonith create test fence_apc ipaddr=i login=l action=a",
+            "Error: stonith option 'action' is deprecated and should not be"
+                " used, use pcmk_off_action, pcmk_reboot_action instead,"
+                " use --force to override\n"
+        )
+
+        self.assert_pcs_success(
+            "stonith create test fence_apc ipaddr=i login=l action=a --force",
+            "Warning: stonith option 'action' is deprecated and should not be"
+                " used, use pcmk_off_action, pcmk_reboot_action instead\n"
+        )
+
+        self.assert_pcs_success(
+            "stonith show --full",
+            outdent(
+                """\
+                 Resource: test (class=stonith type=fence_apc)
+                  Attributes: action=a ipaddr=i login=l
+                  Operations: monitor interval=60s (test-monitor-interval-60s)
+                """
+            )
+        )
+
+    def test_stonith_create_action_empty(self):
+        self.assert_pcs_success(
+            "stonith create test fence_apc ipaddr=i login=l action="
+        )
+
+        self.assert_pcs_success(
+            "stonith show --full",
+            # TODO fix code and test - there should be no action in the attribs
+            outdent(
+                """\
+                 Resource: test (class=stonith type=fence_apc)
+                  Attributes: action= ipaddr=i login=l
+                  Operations: monitor interval=60s (test-monitor-interval-60s)
+                """
+            )
+        )
+
+    def test_stonith_update_action(self):
+        self.assert_pcs_success(
+            "stonith create test fence_apc ipaddr=i login=l"
+        )
+
+        self.assert_pcs_success(
+            "stonith show --full",
+            outdent(
+                """\
+                 Resource: test (class=stonith type=fence_apc)
+                  Attributes: ipaddr=i login=l
+                  Operations: monitor interval=60s (test-monitor-interval-60s)
+                """
+            )
+        )
+
+        self.assert_pcs_fail(
+            "stonith update test action=a",
+            "Error: stonith option 'action' is deprecated and should not be"
+                " used, use pcmk_off_action, pcmk_reboot_action instead,"
+                " use --force to override\n"
+        )
+
+        self.assert_pcs_success(
+            "stonith update test action=a --force",
+            "Warning: stonith option 'action' is deprecated and should not be"
+                " used, use pcmk_off_action, pcmk_reboot_action instead\n"
+        )
+
+        self.assert_pcs_success(
+            "stonith show --full",
+            outdent(
+                """\
+                 Resource: test (class=stonith type=fence_apc)
+                  Attributes: ipaddr=i login=l action=a
+                  Operations: monitor interval=60s (test-monitor-interval-60s)
+                """
+            )
+        )
+
+        self.assert_pcs_success(
+            "stonith update test action="
+        )
+
+        self.assert_pcs_success(
+            "stonith show --full",
+            outdent(
+                """\
+                 Resource: test (class=stonith type=fence_apc)
+                  Attributes: ipaddr=i login=l
+                  Operations: monitor interval=60s (test-monitor-interval-60s)
+                """
+            )
+        )
+
     def testStonithFenceConfirm(self):
         output, returnVal = pcs(temp_cib, "stonith fence blah blah")
         assert returnVal == 1
@@ -361,7 +462,7 @@ class StonithTest(TestCase, AssertPcsMixin):
     def testPcmkHostList(self):
         self.assert_pcs_success(
             "stonith create F1 fence_apc 'pcmk_host_list=nodea nodeb' --force",
-            "Warning: required resource options 'ipaddr', 'login' are missing\n"
+            "Warning: required stonith options 'ipaddr', 'login' are missing\n"
         )
 
         output, returnVal = pcs(temp_cib, "stonith show F1")
@@ -414,31 +515,31 @@ class StonithTest(TestCase, AssertPcsMixin):
 
         self.assert_pcs_success(
             "stonith create n1-ipmi fence_apc --force",
-            "Warning: required resource options 'ipaddr', 'login' are missing\n"
+            "Warning: required stonith options 'ipaddr', 'login' are missing\n"
         )
         self.assert_pcs_success(
             "stonith create n2-ipmi fence_apc --force",
-            "Warning: required resource options 'ipaddr', 'login' are missing\n"
+            "Warning: required stonith options 'ipaddr', 'login' are missing\n"
         )
         self.assert_pcs_success(
             "stonith create n1-apc1 fence_apc --force",
-            "Warning: required resource options 'ipaddr', 'login' are missing\n"
+            "Warning: required stonith options 'ipaddr', 'login' are missing\n"
         )
         self.assert_pcs_success(
             "stonith create n1-apc2 fence_apc --force",
-            "Warning: required resource options 'ipaddr', 'login' are missing\n"
+            "Warning: required stonith options 'ipaddr', 'login' are missing\n"
         )
         self.assert_pcs_success(
             "stonith create n2-apc1 fence_apc --force",
-            "Warning: required resource options 'ipaddr', 'login' are missing\n"
+            "Warning: required stonith options 'ipaddr', 'login' are missing\n"
         )
         self.assert_pcs_success(
             "stonith create n2-apc2 fence_apc --force",
-            "Warning: required resource options 'ipaddr', 'login' are missing\n"
+            "Warning: required stonith options 'ipaddr', 'login' are missing\n"
         )
         self.assert_pcs_success(
             "stonith create n2-apc3 fence_apc --force",
-            "Warning: required resource options 'ipaddr', 'login' are missing\n"
+            "Warning: required stonith options 'ipaddr', 'login' are missing\n"
         )
         self.assert_pcs_success_all([
             "stonith level add 1 rh7-1 n1-ipmi",
diff --git a/pcs/test/tools/assertions.py b/pcs/test/tools/assertions.py
index 97e2472..5177598 100644
--- a/pcs/test/tools/assertions.py
+++ b/pcs/test/tools/assertions.py
@@ -8,10 +8,14 @@ from __future__ import (
 import doctest
 from lxml.doctestcompare import LXMLOutputChecker
 from lxml.etree import LXML_VERSION
+import re
 
 from pcs.lib.errors import LibraryError
 from pcs.test.tools.misc import prepare_diff
 
+# cover python2 vs. python3 differences
+_re_object_type = type(re.compile(""))
+
 def start_tag_error_text():
     """lxml 3.7+ gives a longer 'start tag expected' error message,
     handle it here so multiple tests can just get the appropriate
@@ -41,37 +45,61 @@ class AssertPcsMixin(object):
                     ).format(command, pcs_returncode, stdout)
                 )
 
-    def assert_pcs_success(self, command, stdout_full=None, stdout_start=None):
+    def assert_pcs_success(
+        self, command, stdout_full=None, stdout_start=None, stdout_regexp=None
+    ):
         full = stdout_full
-        if stdout_start is None and stdout_full is None:
+        if (
+            stdout_start is None
+            and
+            stdout_full is None
+            and
+            stdout_regexp is None
+        ):
             full = ""
         self.assert_pcs_result(
             command,
             stdout_full=full,
-            stdout_start=stdout_start
+            stdout_start=stdout_start,
+            stdout_regexp=stdout_regexp,
+            returncode=0
         )
 
-    def assert_pcs_fail(self, command, stdout_full=None, stdout_start=None):
+    def assert_pcs_fail(
+        self, command, stdout_full=None, stdout_start=None, stdout_regexp=None
+    ):
         self.assert_pcs_result(
             command,
             stdout_full=stdout_full,
             stdout_start=stdout_start,
+            stdout_regexp=stdout_regexp,
             returncode=1
         )
 
     def assert_pcs_fail_regardless_of_force(
-        self, command, stdout_full=None, stdout_start=None
+        self, command, stdout_full=None, stdout_start=None, stdout_regexp=None
     ):
-        self.assert_pcs_fail(command, stdout_full, stdout_start)
-        self.assert_pcs_fail(command+" --force", stdout_full, stdout_start)
+        self.assert_pcs_fail(command, stdout_full, stdout_start, stdout_regexp)
+        self.assert_pcs_fail(
+            command + " --force", stdout_full, stdout_start, stdout_regexp
+        )
 
     def assert_pcs_result(
-        self, command, stdout_full=None, stdout_start=None, returncode=0
+        self, command, stdout_full=None, stdout_start=None, stdout_regexp=None,
+        returncode=0
     ):
-        msg = "Please specify exactly one: stdout_start or stdout_full"
-        if stdout_start is None and stdout_full is None:
+        msg = (
+            "Please specify exactly one: stdout_start or stdout_full or"
+            " stdout_regexp"
+        )
+        specified_stdout = [
+            stdout
+            for stdout in (stdout_full, stdout_start, stdout_regexp)
+            if stdout is not None
+        ]
+        if len(specified_stdout) < 1:
             raise Exception(msg + ", none specified")
-        if stdout_start is not None and stdout_full is not None:
+        elif len(specified_stdout) > 1:
             raise Exception(msg + ", both specified")
 
         stdout, pcs_returncode = self.pcs_runner.run(command)
@@ -91,8 +119,7 @@ class AssertPcsMixin(object):
         if stdout_start:
             expected_start = self.__prepare_output(stdout_start)
             if not stdout.startswith(expected_start):
-                self.assertTrue(
-                    False,
+                self.fail(
                     message_template.format(
                         reason="Stdout does not start as expected",
                         cmd=command,
@@ -102,6 +129,24 @@ class AssertPcsMixin(object):
                         stdout=stdout
                     )
                 )
+        elif stdout_regexp:
+            if not isinstance(stdout_regexp, _re_object_type):
+                stdout_regexp = re.compile(stdout_regexp)
+            if not stdout_regexp.search(stdout):
+                self.fail(
+                    (
+                        "Stdout does not match the expected regexp\n"
+                        "command: {cmd}\nregexp:\n{regexp} (flags: {flags})\n"
+                        "\nFull stdout:\n{stdout}"
+                    ).format(
+                        cmd=command,
+                        regexp=stdout_regexp.pattern,
+                        flags=", ".join(
+                            self.__prepare_regexp_flags(stdout_regexp.flags)
+                        ),
+                        stdout=stdout,
+                    )
+                )
         else:
             expected_full = self.__prepare_output(stdout_full)
             if stdout != expected_full:
@@ -121,6 +166,24 @@ class AssertPcsMixin(object):
             return console_report(*output)
         return output
 
+    def __prepare_regexp_flags(self, flags):
+        # python2 has different flags than python3
+        possible_flags = [
+            "ASCII",
+            "DEBUG",
+            "IGNORECASE",
+            "LOCALE",
+            "MULTILINE",
+            "DOTALL",
+            "UNICODE",
+            "VERBOSE",
+        ]
+        used_flags = [
+            f for f in possible_flags
+            if hasattr(re, f) and (flags & getattr(re, f))
+        ]
+        return sorted(used_flags)
+
 
 class ExtendedAssertionsMixin(object):
     def assert_raises(
@@ -250,4 +313,3 @@ def __report_item_equal(real_report_item, report_item_info):
             )
         )
     )
-
diff --git a/pcs/usage.py b/pcs/usage.py
index c73a103..d047d55 100644
--- a/pcs/usage.py
+++ b/pcs/usage.py
@@ -213,7 +213,7 @@ Commands:
            <operation options>]...] [meta <meta options>...]
            [clone [<clone options>] | master [<master options>] |
            --group <group id> [--before <resource id> | --after <resource id>]
-           | bundle <bundle id>] [--disabled] [--wait[=n]]
+           | bundle <bundle id>] [--disabled] [--no-default-ops] [--wait[=n]]
         Create specified resource. If clone is used a clone resource is
         created. If master is specified a master/slave resource is created.
         If --group is specified the resource is added to the group named. You
@@ -221,10 +221,12 @@ Commands:
         resource relatively to some resource already existing in the group.
         If bundle is used, the resource will be created inside of the specified
         bundle. If --disabled is specified the resource is not started
-        automatically. If --wait is specified, pcs will wait up to 'n' seconds
-        for the resource to start and then return 0 if the resource is started,
-        or 1 if the resource has not yet started.  If 'n' is not specified it
-        defaults to 60 minutes.
+        automatically. If --no-default-ops is specified, only monitor
+        operations are created for the resource and all other operations use
+        default settings. If --wait is specified, pcs will wait up to 'n'
+        seconds for the resource to start and then return 0 if the resource is
+        started, or 1 if the resource has not yet started. If 'n' is not
+        specified it defaults to 60 minutes.
         Example: Create a new resource called 'VirtualIP' with IP address
             192.168.0.99, netmask of 32, monitored everything 30 seconds,
             on eth2:
@@ -367,7 +369,8 @@ Commands:
 
     op defaults [options]
         Set default values for operations, if no options are passed, lists
-        currently configured defaults.
+        currently configured defaults. Defaults do not apply to resources which
+        override them with their own defined operations.
 
     meta <resource id | group id | master id | clone id> <meta options>
          [--wait[=n]]
@@ -428,12 +431,13 @@ Commands:
         If 'n' is not specified it defaults to 60 minutes.
         Note: to remove a master you must remove the resource/group it contains.
 
-    bundle create <bundle id> [container [<container type>] <container options>]
+    bundle create <bundle id> container <container type> [<container options>]
             [network <network options>] [port-map <port options>]...
-            [storage-map <storage options>]... [--wait[=n]]
+            [storage-map <storage options>]... [meta <meta options>]
+            [--disabled] [--wait[=n]]
         Create a new bundle encapsulating no resources. The bundle can be used
         either as it is or a resource may be put into it at any time.
-        If the container type is not specified, it defaults to 'docker'.
+        If --disabled is specified, the bundle is not started automatically.
         If --wait is specified, pcs will wait up to 'n' seconds for the bundle
         to start and then return 0 on success or 1 on error. If 'n' is not
         specified it defaults to 60 minutes.
@@ -442,13 +446,14 @@ Commands:
             [network <network options>]
             [port-map (add <port options>) | (remove <id>...)]...
             [storage-map (add <storage options>) | (remove <id>...)]...
+            [meta <meta options>]
             [--wait[=n]]
         Add, remove or change options to specified bundle. If you wish to update
         a resource encapsulated in the bundle, use the 'pcs resource update'
-        command instead and specify the resource id.  If --wait is specified,
+        command instead and specify the resource id. If --wait is specified,
         pcs will wait up to 'n' seconds for the operation to finish (including
         moving resources if appropriate) and then return 0 on success or 1 on
-        error.  If 'n' is not specified it defaults to 60 minutes.
+        error. If 'n' is not specified it defaults to 60 minutes.
 
     manage <resource id>... [--monitor]
         Set resources listed to managed mode (default). If --monitor is
@@ -462,7 +467,8 @@ Commands:
 
     defaults [options]
         Set default values for resources, if no options are passed, lists
-        currently configured defaults.
+        currently configured defaults. Defaults do not apply to resources which
+        override them with their own defined values.
 
     cleanup [<resource id>] [--node <node>]
         Make the cluster forget the operation history of the resource and
@@ -576,6 +582,7 @@ Commands:
             [--ipv6] [--token <timeout>] [--token_coefficient <timeout>]
             [--join <timeout>] [--consensus <timeout>]
             [--miss_count_const <count>] [--fail_recv_const <failures>]
+            [--encryption 0|1]
         Configure corosync and sync configuration out to listed nodes.
         --local will only perform changes on the local node,
         --start will also start the cluster on the specified nodes,
@@ -611,6 +618,8 @@ Commands:
             without receiving any messages when messages should be received
             may occur before a new configuration is formed
             (default 2500 failures)
+        --encryption 0|1 disables (0) or enables (1) corosync communication
+            encryption (default 0)
 
         Configuring Redundant Ring Protocol (RRP)
 
diff --git a/pcs/utils.py b/pcs/utils.py
index 4753b87..081ee65 100644
--- a/pcs/utils.py
+++ b/pcs/utils.py
@@ -1950,41 +1950,6 @@ def getClusterStateXml():
         err("error running crm_mon, is pacemaker running?")
     return xml
 
-# Returns true if stonith-enabled is not false/off & no stonith devices exist
-# So if the cluster can't start due to missing stonith devices return true
-def stonithCheck():
-    et = get_cib_etree()
-    cps = et.find("configuration/crm_config/cluster_property_set")
-    if cps != None:
-        for prop in cps.findall(str("nvpair")):
-            if 'name' in prop.attrib and prop.attrib["name"] == "stonith-enabled":
-                if prop.attrib["value"] == "off" or \
-                        prop.attrib["value"] == "false":
-                    return False
-
-    xpath_list = (
-        "configuration/resources/primitive",
-        "configuration/resources/group/primitive",
-        "configuration/resources/clone/primitive",
-        "configuration/resources/clone/group/primitive",
-        "configuration/resources/master/primitive",
-        "configuration/resources/master/group/primitive",
-    )
-    for xpath in xpath_list:
-        for p in et.findall(str(xpath)):
-            if ("class" in p.attrib) and (p.attrib["class"] == "stonith"):
-                return False
-
-    if not usefile:
-        # check if SBD daemon is running
-        try:
-            if is_service_running(cmd_runner(), sbd.get_sbd_service_name()):
-                return False
-        except LibraryError:
-            pass
-
-    return True
-
 def getCorosyncNodesID(allow_failure=False):
     if os.getuid() == 0:
         if is_rhel6():
@@ -2870,6 +2835,7 @@ def get_modificators():
     return {
         "after": pcs_options.get("--after", None),
         "all": "--all" in pcs_options,
+        "async": "--async" in pcs_options,
         "autocorrect": "--autocorrect" in pcs_options,
         "autodelete": "--autodelete" in pcs_options,
         "before": pcs_options.get("--before", None),
@@ -2878,6 +2844,7 @@ def get_modificators():
         "device": pcs_options.get("--device", []),
         "disabled": "--disabled" in pcs_options,
         "enable": "--enable" in pcs_options,
+        "encryption": pcs_options.get("--encryption", "0"),
         "force": "--force" in pcs_options,
         "full": "--full" in pcs_options,
         "group": pcs_options.get("--group", None),
diff --git a/pcsd/bootstrap.rb b/pcsd/bootstrap.rb
index 78bd87f..1b129b2 100644
--- a/pcsd/bootstrap.rb
+++ b/pcsd/bootstrap.rb
@@ -47,7 +47,7 @@ def get_pcs_path()
   end
 end
 
-PCS_VERSION = '0.9.158'
+PCS_VERSION = '0.9.159'
 # unique instance signature, allows detection of dameon restarts
 DAEMON_INSTANCE_SIGNATURE = Digest::SHA2.hexdigest("#{Time.now} #{rand()}")
 COROSYNC = COROSYNC_BINARIES + "corosync"
diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
index 930b4a0..9725c4b 100644
--- a/pcsd/pcs.rb
+++ b/pcsd/pcs.rb
@@ -61,15 +61,16 @@ def add_node_attr(auth_user, node, key, value)
 end
 
 def add_meta_attr(auth_user, resource, key, value)
-  # --force is a workaround for:
-  # 1) Error: this command is not sufficient for create guest node, use 'pcs
-  # cluster node add-guest', use --force to override
-  # 2) Error: this command is not sufficient for remove guest node, use 'pcs
-  # cluster node remove-guest', use --force to override
-  stdout, stderr, retval = run_cmd(
-    auth_user, PCS, "resource", "meta", resource, key.to_s + "=" + value.to_s,
-    "--force"
-  )
+  cmd = ["resource", "meta", resource, key.to_s + "=" + value.to_s]
+  if key.to_s == "remote-node"
+    # --force is a workaround for:
+    # 1) Error: this command is not sufficient for create guest node, use 'pcs
+    # cluster node add-guest', use --force to override
+    # 2) Error: this command is not sufficient for remove guest node, use 'pcs
+    # cluster node remove-guest', use --force to override
+    cmd << "--force"
+  end
+  stdout, stderr, retval = run_cmd(auth_user, PCS, *cmd)
   return retval
 end
 
@@ -1034,7 +1035,8 @@ def pcsd_restart()
   # request
   fork {
     # let us send the response to the restart request
-    sleep(3)
+    # we need little bit more time to finish some things when setting up cluster
+    sleep(10)
     if ISSYSTEMCTL
       exec("systemctl", "restart", "pcsd")
     else
@@ -1834,6 +1836,7 @@ def get_node_status(auth_user, cib_dom)
         'moving_resource_in_group',
         'unmanaged_resource',
         'alerts',
+        'hardened_cluster',
       ]
   }
 
diff --git a/pcsd/pcsd.8 b/pcsd/pcsd.8
index 2d8b8df..8a929df 100644
--- a/pcsd/pcsd.8
+++ b/pcsd/pcsd.8
@@ -1,4 +1,4 @@
-.TH PCSD "8" "May 2017" "pcs 0.9.158" "System Administration Utilities"
+.TH PCSD "8" "June 2017" "pcs 0.9.159" "System Administration Utilities"
 .SH NAME
 pcsd \- pacemaker/corosync configuration system daemon
 
diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
index 33d999d..1026a36 100644
--- a/pcsd/pcsd.rb
+++ b/pcsd/pcsd.rb
@@ -568,7 +568,8 @@ already been added to pcsd.  You may not add two clusters with the same name int
       {
         :clustername => @cluster_name,
         :nodes => @nodes_rrp.join(';'),
-        :options => options.to_json
+        :options => options.to_json,
+        :encryption => params[:encryption],
       },
       true,
       nil,
diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js
index 69b6e53..d98e534 100644
--- a/pcsd/public/js/nodes-ember.js
+++ b/pcsd/public/js/nodes-ember.js
@@ -1228,7 +1228,8 @@ Pcs.ResourceAgentParameter = Ember.Object.extend({
     if (longdesc) desc.push(longdesc);
     if (def_val) desc.push("Default value: " + def_val);
     return desc.join("<br /><br />");
-  }.property("longdesc", "shortdesc", "default")
+  }.property("longdesc", "shortdesc", "default"),
+  pcs_deprecated_warning: ""
 });
 
 Pcs.ResourceAgent = Ember.Object.extend({
diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
index b7ad72f..4754139 100644
--- a/pcsd/public/js/pcsd.js
+++ b/pcsd/public/js/pcsd.js
@@ -1080,7 +1080,7 @@ function update_create_cluster_dialog(nodes, version_info) {
     ajax_wrapper({
       type: "POST",
       url: "/manage/newcluster",
-      timeout: pcs_timeout,
+      timeout: 60*1000,
       data: $('#create_new_cluster_form').serialize(),
       success: function(data) {
         if (data) {
@@ -1090,7 +1090,17 @@ function update_create_cluster_dialog(nodes, version_info) {
         Pcs.update();
       },
       error: function (xhr, status, error) {
-        alert(xhr.responseText);
+        var err_msg = "";
+        if ((status == "timeout") || ($.trim(error) == "timeout")) {
+          err_msg = (
+            "Operation takes longer to complete than expected. " +
+            "It may continue running in the background. Later, you can try " +
+            "to add this cluster as existing one."
+          );
+        } else {
+          err_msg = xhr.responseText;
+        }
+        alert(err_msg);
         $("#create_cluster_submit_btn").button("option", "disabled", false);
       }
     });
diff --git a/pcsd/remote.rb b/pcsd/remote.rb
index 005d45e..672b5e9 100644
--- a/pcsd/remote.rb
+++ b/pcsd/remote.rb
@@ -964,9 +964,12 @@ def setup_cluster(params, request, auth_user)
   end
   nodes_options = nodes + options
   nodes_options += options_udp if transport_udp
+  if ['0', '1'].include?(params[:encryption])
+      nodes_options << "--encryption=#{params[:encryption]}"
+  end
   stdout, stderr, retval = run_cmd(
-    auth_user, PCS, "cluster", "setup", "--enable", "--start",
-    "--name", params[:clustername], *nodes_options
+    auth_user, PCS, "cluster", "setup", "--enable", "--start", "--async",
+    "--name",  params[:clustername], *nodes_options
   )
   if retval != 0
     return [
@@ -1463,9 +1466,13 @@ def update_resource (params, request, auth_user)
       end
       resource_group = params[:resource_group]
     end
-    # workaround for Error: this command is not sufficient for create remote
-    # connection, use 'pcs cluster node add-remote', use --force to override
-    cmd << "--force"
+    if params[:resource_type] == "ocf:pacemaker:remote"
+      # Workaround for Error: this command is not sufficient for create remote
+      # connection, use 'pcs cluster node add-remote', use --force to override.
+      # It is not possible to specify meta attributes so we don't need to take
+      # care of those.
+      cmd << "--force"
+    end
     out, stderr, retval = run_cmd(auth_user, *cmd)
     if retval != 0
       return JSON.generate({"error" => "true", "stderr" => stderr, "stdout" => out})
diff --git a/pcsd/ssl.rb b/pcsd/ssl.rb
index 24ee059..1a41ab2 100644
--- a/pcsd/ssl.rb
+++ b/pcsd/ssl.rb
@@ -99,8 +99,10 @@ def run_server(server, webrick_options, secondary_addrs)
   rescue Errno::EADDRNOTAVAIL, Errno::EADDRINUSE => e
     $logger.error 'Unable to bind to specified address(es), exiting'
     $logger.error e.message
+    exit 1
   rescue SocketError => e
     $logger.error e.message
+    exit 1
   end
 end
 
diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb
index ceb2b7d..79e6909 100644
--- a/pcsd/views/main.erb
+++ b/pcsd/views/main.erb
@@ -848,6 +848,10 @@ Use the 'Add' button to submit the form.">
           value=param.cur_val
           placeholder=param.default
       }}
+      {{#if param.pcs_deprecated_warning}}
+        <br />
+        <span class="status-warning">{{param.pcs_deprecated_warning}}</span>
+      {{/if}}
     </td>
   </script>
 
diff --git a/pcsd/views/manage.erb b/pcsd/views/manage.erb
index 39ab41f..2b12aaa 100644
--- a/pcsd/views/manage.erb
+++ b/pcsd/views/manage.erb
@@ -222,6 +222,9 @@
       <table>
 	<% transport_desc = "\
 Enables either udpu (unicast) or udp (multicast) cluster communication (default: udpu)"%>
+	<% encryption_desc = "\
+Create cluster with encrypted corosync communication. This option may not work \
+with pcs version lower than 0.9.159." %>
 	<% wait_for_all_desc = "\
 Enables Wait For All (WFA) feature (default: off).
 
@@ -345,6 +348,15 @@ Specify ring 1 address for each node if you want to use RRP." %>
             </select>
           </td>
         </tr>
+        <tr title="<%= h(encryption_desc) %>"><td align=right>Encryption:</td>
+          <td>
+            <select name="encryption">
+              <option selected="selected">(Default)</option>
+              <option value="1">On</option>
+              <option value="0">Off</option>
+            </select>
+          </td>
+        </tr>
 	<tr title="<%= h(wait_for_all_desc) %>"><td align=right>Wait for All:</td><td><input type=checkbox name="config-wait_for_all"></td></tr>
 	<tr title="<%= h(auto_tie_desc) %>"><td align=right>Auto Tie Breaker:</td><td><input type=checkbox name="config-auto_tie_breaker"></td></tr>
 	<tr title="<%= h(last_man_desc) %>"><td align=right>Last Man Standing:</td><td><input type=checkbox name="config-last_man_standing"></td></tr>
diff --git a/setup.py b/setup.py
index 1525aae..df385cb 100644
--- a/setup.py
+++ b/setup.py
@@ -17,7 +17,7 @@ class CleanCommand(Command):
 
 setup(
     name='pcs',
-    version='0.9.158',
+    version='0.9.159',
     description='Pacemaker Configuration System',
     author='Chris Feist',
     author_email='cfeist at redhat.com',

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-ha/pcs.git



More information about the Debian-HA-Commits mailing list