[Debian-ha-commits] [pcs] 01/08: New upstream version 0.9.155

Valentin Vidic vvidic-guest at moszumanska.debian.org
Sun Nov 13 15:37:14 UTC 2016


This is an automated email from the git hooks/post-receive script.

vvidic-guest pushed a commit to branch master
in repository pcs.

commit 0cfd35942ce16edc87cb7e0251c3c697cfa75c85
Author: Valentin Vidic <Valentin.Vidic at CARNet.hr>
Date:   Sun Nov 13 13:32:37 2016 +0100

    New upstream version 0.9.155
---
 CHANGELOG.md                                       |   41 +
 newversion.py                                      |    7 +-
 pcs/acl.py                                         |  529 +++----
 pcs/alert.py                                       |   20 +-
 pcs/app.py                                         |    6 +-
 pcs/cli/booth/console_report.py                    |  140 ++
 pcs/cli/booth/test/test_env.py                     |    6 +-
 pcs/cli/booth/test/test_reports.py                 |  141 ++
 pcs/cli/common/console_report.py                   |  731 +++++++++
 pcs/cli/common/env.py                              |    1 +
 pcs/cli/common/lib_wrapper.py                      |  168 +-
 pcs/cli/common/middleware.py                       |   16 +
 pcs/cli/common/reports.py                          |   65 +-
 pcs/cli/common/test/test_console_report.py         |  221 ++-
 pcs/cli/common/test/test_lib_wrapper.py            |    6 +-
 pcs/cli/common/test/test_reports.py                |   84 +
 pcs/cli/constraint_all/console_report.py           |   31 +-
 pcs/cli/constraint_all/test/test_console_report.py |   47 +-
 pcs/cluster.py                                     |  124 +-
 pcs/common/report_codes.py                         |   16 +-
 pcs/lib/booth/reports.py                           |   58 -
 pcs/lib/booth/test/test_sync.py                    |    6 -
 pcs/lib/cib/acl.py                                 |  361 ++++-
 pcs/lib/cib/alert.py                               |   30 +-
 pcs/lib/cib/nvpair.py                              |   64 +-
 pcs/lib/cib/test/test_alert.py                     |   37 +-
 pcs/lib/cib/test/test_nvpair.py                    |  142 +-
 pcs/lib/cib/tools.py                               |   29 +-
 pcs/lib/cluster_conf_facade.py                     |   59 +
 pcs/lib/commands/acl.py                            |  333 ++++
 pcs/lib/commands/alert.py                          |   44 +-
 pcs/lib/commands/resource_agent.py                 |  115 ++
 pcs/lib/commands/sbd.py                            |   61 +-
 pcs/lib/commands/stonith_agent.py                  |   45 +
 pcs/lib/commands/test/test_acl.py                  |  510 ++++++
 pcs/lib/commands/test/test_alert.py                |  154 +-
 pcs/lib/commands/test/test_booth.py                |    2 -
 pcs/lib/commands/test/test_resource_agent.py       |  362 +++++
 pcs/lib/commands/test/test_stonith_agent.py        |  212 +++
 pcs/lib/corosync/live.py                           |   14 +-
 pcs/lib/env.py                                     |   26 +-
 pcs/lib/errors.py                                  |   20 +-
 pcs/lib/external.py                                |   56 +-
 pcs/lib/reports.py                                 |  422 ++---
 pcs/lib/resource_agent.py                          |  851 ++++++----
 pcs/lib/sbd.py                                     |    9 +-
 pcs/lib/test/test_cluster_conf_facade.py           |  151 ++
 pcs/lib/test/test_pacemaker_values.py              |    7 +-
 pcs/lib/test/test_resource_agent.py                | 1631 ++++++++++++++++++++
 pcs/pcs.8                                          |   32 +-
 pcs/resource.py                                    |  804 +++++-----
 pcs/settings_default.py                            |    9 +-
 pcs/stonith.py                                     |  207 +--
 pcs/test/suite.py                                  |   66 +-
 pcs/test/test_acl.py                               |  142 +-
 pcs/test/test_alert.py                             |  151 +-
 pcs/test/test_booth.py                             |    5 +-
 pcs/test/test_cluster.py                           |   40 +-
 pcs/test/test_constraints.py                       |  105 +-
 pcs/test/test_lib_cib_acl.py                       |  984 +++++++++++-
 pcs/test/test_lib_cib_tools.py                     |   87 ++
 pcs/test/test_lib_commands_sbd.py                  |   84 +-
 pcs/test/test_lib_corosync_live.py                 |   25 +
 pcs/test/test_lib_env.py                           |   51 +-
 pcs/test/test_lib_external.py                      |   80 +-
 pcs/test/test_lib_resource_agent.py                |  893 -----------
 pcs/test/test_lib_sbd.py                           |   32 +-
 pcs/test/test_resource.py                          | 1067 ++++++++-----
 pcs/test/test_rule.py                              |    5 +-
 pcs/test/test_stonith.py                           |   53 +-
 pcs/test/tools/assertions.py                       |    2 +-
 pcs/test/tools/color_text_runner.py                |  117 --
 pcs/test/tools/color_text_runner/__init__.py       |    8 +
 pcs/test/tools/color_text_runner/format.py         |  157 ++
 pcs/test/tools/color_text_runner/result.py         |  120 ++
 pcs/test/tools/color_text_runner/writer.py         |  132 ++
 pcs/test/tools/misc.py                             |    8 +
 pcs/test/tools/pcs_runner.py                       |    2 +-
 pcs/test/tools/pcs_unittest.py                     |    1 +
 pcs/test/tools/test/__init__.py                    |    0
 pcs/test/tools/test/test_misc.py                   |   45 +
 pcs/usage.py                                       |   41 +-
 pcs/utils.py                                       |  174 +--
 pcsd/Makefile                                      |    2 +-
 pcsd/auth.rb                                       |    1 +
 pcsd/bootstrap.rb                                  |    3 +-
 pcsd/cluster_entity.rb                             |    6 +-
 pcsd/fenceagent.rb                                 |   18 +-
 pcsd/pcs.rb                                        |   73 +-
 pcsd/public/js/pcsd.js                             |    4 +-
 pcsd/remote.rb                                     |  158 +-
 pcsd/settings.rb                                   |    4 -
 pcsd/settings.rb.debian                            |    5 +-
 pcsd/views/_acls.erb                               |    4 +-
 .pylintrc => pylintrc                              |    2 +-
 setup.py                                           |    2 +-
 96 files changed, 10353 insertions(+), 3869 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
new file mode 100644
index 0000000..5b3c97b
--- /dev/null
+++ b/CHANGELOG.md
@@ -0,0 +1,41 @@
+# Change Log
+
+## [0.9.155] - 2016-11-03
+
+### Added
+- Show daemon status in `pcs status` on non-systemd machines
+- SBD support for cman clusters ([rhbz#1380352])
+- Alerts management in pcsd ([rhbz#1376480])
+
+### Changed
+- Get all information about resource and stonith agents from pacemaker. Pcs now
+  supports the same set of agents as pacemaker does. ([rhbz#1262001],
+  [ghissue#81])
+- `pcs resource create` now exits with an error if more than one resource agent
+  matches the specified short agent name instead of randomly selecting one of
+  the agents
+- Allow to remove multiple alerts and alert recipients at once
+
+### Fixed
+- When stopping a cluster with some of the nodes unreachable, stop the cluster
+  completely on all reachable nodes ([rhbz#1380372])
+- Fixed pcsd crash when rpam rubygem is installed ([ghissue#109])
+- Fixed occasional crashes / failures when using locale other than en_US.UTF8
+  ([rhbz#1387106])
+- Fixed starting and stopping cluster services on systemd machines without
+  the `service` executable ([ghissue#115])
+
+
+[ghissue#81]: https://github.com/ClusterLabs/pcs/issues/81
+[ghissue#109]: https://github.com/ClusterLabs/pcs/issues/109
+[ghissue#115]: https://github.com/ClusterLabs/pcs/issues/115
+[rhbz#1262001]: https://bugzilla.redhat.com/show_bug.cgi?id=1262001
+[rhbz#1376480]: https://bugzilla.redhat.com/show_bug.cgi?id=1376480
+[rhbz#1380352]: https://bugzilla.redhat.com/show_bug.cgi?id=1380352
+[rhbz#1380372]: https://bugzilla.redhat.com/show_bug.cgi?id=1380372
+[rhbz#1387106]: https://bugzilla.redhat.com/show_bug.cgi?id=1387106
+
+
+## [0.9.154] - 2016-09-21
+- There is no change log for this and previous releases. We are sorry.
+- Take a look at git history if you are interested.
diff --git a/newversion.py b/newversion.py
index fed9a5e..300a445 100644
--- a/newversion.py
+++ b/newversion.py
@@ -24,9 +24,10 @@ pcs_version_split = settings.pcs_version.split('.')
 pcs_version_split[2] = str(int(pcs_version_split[2]) + 1)
 new_version = ".".join(pcs_version_split)
 
-print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' setup.py"))
-print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' pcs/settings_default.py"))
-print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' pcsd/bootstrap.rb"))
+print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version+"/' setup.py"))
+print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version+"/' pcs/settings_default.py"))
+print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version+"/' pcsd/bootstrap.rb"))
+print(os.system("sed -i 's/\#\# \[Unreleased\]/\#\# ["+new_version+"] - "+datetime.date.today().strftime('%Y-%m-%d')+"/' CHANGELOG.md"))
 
 manpage_head = '.TH PCS "8" "{date}" "pcs {version}" "System Administration Utilities"'.format(
     date=datetime.date.today().strftime('%B %Y'),
diff --git a/pcs/acl.py b/pcs/acl.py
index 0378c10..b526ae9 100644
--- a/pcs/acl.py
+++ b/pcs/acl.py
@@ -12,49 +12,54 @@ from pcs import (
     usage,
     utils,
 )
-from pcs.lib.pacemaker import get_cib_xml, get_cib, replace_cib_configuration
 from pcs.lib.pacemaker_values import is_true
-from pcs.lib.cib.acl import (
-    add_permissions_to_role,
-    create_role,
-    provide_role,
-)
+from pcs.cli.common.console_report import indent
 from pcs.cli.common.errors import CmdLineInputError
 from pcs.lib.errors import LibraryError
 
-def acl_cmd(argv):
-    if len(argv) == 0:
-        argv = ["show"]
-
-    sub_cmd = argv.pop(0)
-
-    # If we're using help or show we don't upgrade, otherwise upgrade if necessary
-    if sub_cmd not in ["help","show"]:
-        utils.checkAndUpgradeCIB(2,0,0)
-
-    if (sub_cmd == "help"):
-        usage.acl(argv)
-    elif (sub_cmd == "show"):
-        acl_show(argv)
-    elif (sub_cmd == "enable"):
-        acl_enable(argv)
-    elif (sub_cmd == "disable"):
-        acl_disable(argv)
-    elif (sub_cmd == "role"):
-        acl_role(argv)
-    elif (sub_cmd == "target" or sub_cmd == "user"):
-        acl_target(argv)
-    elif sub_cmd == "group":
-        acl_target(argv, True)
-    elif sub_cmd == "permission":
-        acl_permission(argv)
+def acl_cmd(lib, argv, modifiers):
+    if len(argv) < 1:
+        sub_cmd, argv_next = "show", []
     else:
-        usage.acl()
-        sys.exit(1)
+        sub_cmd, argv_next = argv[0], argv[1:]
+
+    try:
+        if sub_cmd == "help":
+            usage.acl(argv_next)
+        elif sub_cmd == "show":
+            show_acl_config(lib, argv_next, modifiers)
+        elif sub_cmd == "enable":
+            acl_enable(argv_next)
+        elif sub_cmd == "disable":
+            acl_disable(argv_next)
+        elif sub_cmd == "role":
+            acl_role(lib, argv_next, modifiers)
+        elif sub_cmd in ["target", "user"]:
+            acl_user(lib, argv_next, modifiers)
+        elif sub_cmd == "group":
+            acl_group(lib, argv_next, modifiers)
+        elif sub_cmd == "permission":
+            acl_permission(lib, argv_next, modifiers)
+        else:
+            raise CmdLineInputError()
+    except LibraryError as e:
+        utils.process_library_reports(e.args)
+    except CmdLineInputError as e:
+        utils.exit_on_cmdline_input_errror(e, "acl", sub_cmd)
+
 
-def acl_show(argv):
-    dom = utils.get_cib_dom()
+def _print_list_of_objects(obj_list, transformation_fn):
+    out = []
+    for obj in obj_list:
+        out += transformation_fn(obj)
+    if out:
+        print("\n".join(out))
 
+
+def show_acl_config(lib, argv, modifiers):
+    # TODO move to lib once lib supports cluster properties
+    # enabled/disabled should be part of the structure returned
+    # by lib.acl.get_config
     properties = utils.get_set_properties(defaults=prop.get_default_properties())
     acl_enabled = properties.get("enable-acl", "").lower()
     if is_true(acl_enabled):
@@ -63,155 +68,122 @@ def acl_show(argv):
         print("ACLs are disabled, run 'pcs acl enable' to enable")
     print()
 
-    print_targets(dom)
-    print_groups(dom)
-    print_roles(dom)
+    data = lib.acl.get_config()
+    _print_list_of_objects(data.get("target_list", []), target_to_str)
+    _print_list_of_objects(data.get("group_list", []), group_to_str)
+    _print_list_of_objects(data.get("role_list", []), role_to_str)
+
 
 def acl_enable(argv):
+    # TODO move to lib once lib supports cluster properties
     prop.set_property(["enable-acl=true"])
 
 def acl_disable(argv):
+    # TODO move to lib once lib supports cluster properties
     prop.set_property(["enable-acl=false"])
 
-def acl_role(argv):
-    if len(argv) < 2:
-        usage.acl(["role"])
-        sys.exit(1)
-
-    command = argv.pop(0)
-    if command == "create":
-        try:
-            run_create_role(argv)
-        except CmdLineInputError as e:
-            utils.exit_on_cmdline_input_errror(e, 'acl', 'role create')
-        except LibraryError as e:
-            utils.process_library_reports(e.args)
-
-
-    elif command == "delete":
-        run_role_delete(argv)
-    elif command == "assign":
-        run_role_assign(argv)
-    elif command == "unassign":
-        run_role_unassign(argv)
-    else:
-        usage.acl(["role"])
-        sys.exit(1)
 
-def acl_target(argv,group=False):
-    if len(argv) < 2:
-        if group:
-            usage.acl(["group"])
-            sys.exit(1)
+def acl_role(lib, argv, modifiers):
+    if len(argv) < 1:
+        raise CmdLineInputError()
+
+    sub_cmd, argv_next = argv[0], argv[1:]
+    try:
+        if sub_cmd == "create":
+            role_create(lib, argv_next, modifiers)
+        elif sub_cmd == "delete":
+            role_delete(lib, argv_next, modifiers)
+        elif sub_cmd == "assign":
+            role_assign(lib, argv_next, modifiers)
+        elif sub_cmd == "unassign":
+            role_unassign(lib, argv_next, modifiers)
         else:
-            usage.acl(["user"])
+            usage.show("acl", ["role"])
             sys.exit(1)
+    except CmdLineInputError as e:
+        utils.exit_on_cmdline_input_errror(e, "acl", "role {0}".format(sub_cmd))
 
-    dom = utils.get_cib_dom()
-    acls = utils.get_acls(dom)
-
-    command = argv.pop(0)
-    tug_id = argv.pop(0)
-    if command == "create":
-        # pcsd parses the error message in order to determine whether the id is
-        # assigned to user/group or some other cib element
-        if group and utils.dom_get_element_with_id(dom, "acl_group", tug_id):
-            utils.err("group %s already exists" % tug_id)
-        if not group and utils.dom_get_element_with_id(dom, "acl_target", tug_id):
-            utils.err("user %s already exists" % tug_id)
-        if utils.does_id_exist(dom,tug_id):
-            utils.err(tug_id + " already exists")
-
-        if group:
-            element = dom.createElement("acl_group")
-        else:
-            element = dom.createElement("acl_target")
-        element.setAttribute("id", tug_id)
-
-        acls.appendChild(element)
-        for role in argv:
-            if not utils.dom_get_element_with_id(acls, "acl_role", role):
-                utils.err("cannot find acl role: %s" % role)
-            r = dom.createElement("role")
-            r.setAttribute("id", role)
-            element.appendChild(r)
-
-        utils.replace_cib_configuration(dom)
-    elif command == "delete":
-        found = False
-        if group:
-            elist = dom.getElementsByTagName("acl_group")
+
+def acl_user(lib, argv, modifiers):
+    if len(argv) < 1:
+        raise CmdLineInputError()
+
+    sub_cmd, argv_next = argv[0], argv[1:]
+    try:
+        if sub_cmd == "create":
+            user_create(lib, argv_next, modifiers)
+        elif sub_cmd == "delete":
+            user_delete(lib, argv_next, modifiers)
         else:
-            elist = dom.getElementsByTagName("acl_target")
-
-        for elem in elist:
-            if elem.getAttribute("id") == tug_id:
-                found = True
-                elem.parentNode.removeChild(elem)
-                break
-        if not found:
-            if group:
-                utils.err("unable to find acl group: %s" % tug_id)
-            else:
-                utils.err("unable to find acl target/user: %s" % tug_id)
-        utils.replace_cib_configuration(dom)
-    else:
-        if group:
-            usage.acl(["group"])
+            usage.show("acl", ["user"])
+            sys.exit(1)
+    except CmdLineInputError as e:
+        utils.exit_on_cmdline_input_errror(e, "acl", "user {0}".format(sub_cmd))
+
+
+def user_create(lib, argv, dummy_modifiers):
+    if len(argv) < 1:
+        raise CmdLineInputError()
+    user_name, role_list = argv[0], argv[1:]
+    lib.acl.create_target(user_name, role_list)
+
+
+def user_delete(lib, argv, dummy_modifiers):
+    if len(argv) != 1:
+        raise CmdLineInputError()
+    lib.acl.remove_target(argv[0])
+
+
+def acl_group(lib, argv, modifiers):
+    if len(argv) < 1:
+        raise CmdLineInputError()
+
+    sub_cmd, argv_next = argv[0], argv[1:]
+    try:
+        if sub_cmd == "create":
+            group_create(lib, argv_next, modifiers)
+        elif sub_cmd == "delete":
+            group_delete(lib, argv_next, modifiers)
         else:
-            usage.acl(["user"])
-        sys.exit(1)
+            usage.show("acl", ["group"])
+            sys.exit(1)
+    except CmdLineInputError as e:
+        utils.exit_on_cmdline_input_errror(
+            e, "acl", "group {0}".format(sub_cmd)
+        )
 
-def acl_permission(argv):
+
+def group_create(lib, argv, dummy_modifiers):
     if len(argv) < 1:
-        usage.acl(["permission"])
-        sys.exit(1)
+        raise CmdLineInputError()
+    group_name, role_list = argv[0], argv[1:]
+    lib.acl.create_group(group_name, role_list)
 
-    command = argv.pop(0)
-    if command == "add":
-        try:
-            run_permission_add(argv)
-        except CmdLineInputError as e:
-            utils.exit_on_cmdline_input_errror(e, 'acl', 'permission add')
-        except LibraryError as e:
-            utils.process_library_reports(e.args)
 
-    elif command == "delete":
-        run_permission_delete(argv)
+def group_delete(lib, argv, dummy_modifiers):
+    if len(argv) != 1:
+        raise CmdLineInputError()
+    lib.acl.remove_group(argv[0])
+
+
+def acl_permission(lib, argv, modifiers):
+    if len(argv) < 1:
+        raise CmdLineInputError()
+
+    sub_cmd, argv_next = argv[0], argv[1:]
+    try:
+        if sub_cmd == "add":
+            permission_add(lib, argv_next, modifiers)
+        elif sub_cmd == "delete":
+            run_permission_delete(lib, argv_next, modifiers)
+        else:
+            usage.show("acl", ["permission"])
+            sys.exit(1)
+    except CmdLineInputError as e:
+        utils.exit_on_cmdline_input_errror(
+            e, "acl", "permission {0}".format(sub_cmd)
+        )
 
-    else:
-        usage.acl(["permission"])
-        sys.exit(1)
-
-def print_groups(dom):
-    for elem in dom.getElementsByTagName("acl_group"):
-        print("Group: " + elem.getAttribute("id"))
-        role_list = []
-        for role in elem.getElementsByTagName("role"):
-            role_list.append(role.getAttribute("id"))
-        print(" ".join(["  Roles:"] + role_list))
-
-def print_targets(dom):
-    for elem in dom.getElementsByTagName("acl_target"):
-        print("User: " + elem.getAttribute("id"))
-        role_list = []
-        for role in elem.getElementsByTagName("role"):
-            role_list.append(role.getAttribute("id"))
-        print(" ".join(["  Roles:"] + role_list))
-
-def print_roles(dom):
-    for elem in dom.getElementsByTagName("acl_role"):
-        print("Role: " + elem.getAttribute("id"))
-        if elem.getAttribute("description"):
-            print("  Description: " + elem.getAttribute("description"))
-        for perm in elem.getElementsByTagName("acl_permission"):
-            perm_name = "  Permission: " + perm.getAttribute("kind")
-            if "xpath" in perm.attributes.keys():
-                perm_name += " xpath " + perm.getAttribute("xpath")
-            elif "reference" in perm.attributes.keys():
-                perm_name += " id " + perm.getAttribute("reference")
-            perm_name += " (" + perm.getAttribute("id") + ")"
-            print(perm_name)
 
 def argv_to_permission_info_list(argv):
     if len(argv) % 3 != 0:
@@ -236,9 +208,11 @@ def argv_to_permission_info_list(argv):
 
     return permission_info_list
 
-def run_create_role(argv):
+
+def role_create(lib, argv, modifiers):
     if len(argv) < 1:
         raise CmdLineInputError()
+
     role_id = argv.pop(0)
     description = ""
     desc_key = 'description='
@@ -246,142 +220,109 @@ def run_create_role(argv):
         description = argv.pop(0)[len(desc_key):]
     permission_info_list = argv_to_permission_info_list(argv)
 
-    cib = get_cib(get_cib_xml(utils.cmd_runner()))
-    create_role(cib, role_id, description)
-    add_permissions_to_role(cib, role_id, permission_info_list)
-    replace_cib_configuration(utils.cmd_runner(), cib)
+    lib.acl.create_role(role_id, permission_info_list, description)
 
-def run_role_delete(argv):
-    if len(argv) < 1:
-        usage.acl(["role delete"])
-        sys.exit(1)
 
-    role_id = argv.pop(0)
-    dom = utils.get_cib_dom()
-    found = False
-    for elem in dom.getElementsByTagName("acl_role"):
-        if elem.getAttribute("id") == role_id:
-            found = True
-            elem.parentNode.removeChild(elem)
-            break
-    if not found:
-        utils.err("unable to find acl role: %s" % role_id)
-
-    # Remove any references to this role in acl_target or acl_group
-    for elem in dom.getElementsByTagName("role"):
-        if elem.getAttribute("id") == role_id:
-            user_group = elem.parentNode
-            user_group.removeChild(elem)
-            if "--autodelete" in utils.pcs_options:
-                if not user_group.getElementsByTagName("role"):
-                    user_group.parentNode.removeChild(user_group)
-
-    utils.replace_cib_configuration(dom)
-
-def run_role_assign(argv):
-    if len(argv) < 2:
-        usage.acl(["role assign"])
-        sys.exit(1)
-
-    if len(argv) == 2:
-        role_id = argv[0]
-        ug_id = argv[1]
-    elif len(argv) > 2 and argv[1] == "to":
-        role_id = argv[0]
-        ug_id = argv[2]
-    else:
-        usage.acl(["role assign"])
-        sys.exit(1)
-
-    dom = utils.get_cib_dom()
-    found = False
-    for role in dom.getElementsByTagName("acl_role"):
-        if role.getAttribute("id") == role_id:
-            found = True
-            break
-
-    if not found:
-        utils.err("cannot find role: %s" % role_id)
-
-    found = False
-    for ug in dom.getElementsByTagName("acl_target") + dom.getElementsByTagName("acl_group"):
-        if ug.getAttribute("id") == ug_id:
-            found = True
-            break
-
-    if not found:
-        utils.err("cannot find user or group: %s" % ug_id)
-
-    for current_role in ug.getElementsByTagName("role"):
-        if current_role.getAttribute("id") == role_id:
-            utils.err(role_id + " is already assigned to " + ug_id)
-
-    new_role = dom.createElement("role")
-    new_role.setAttribute("id", role_id)
-    ug.appendChild(new_role)
-    utils.replace_cib_configuration(dom)
-
-def run_role_unassign(argv):
-    if len(argv) < 2:
-        usage.acl(["role unassign"])
-        sys.exit(1)
+def role_delete(lib, argv, modifiers):
+    if len(argv) != 1:
+        raise CmdLineInputError()
 
-    role_id = argv.pop(0)
-    if len(argv) > 1 and argv[0] == "from":
-        ug_id = argv[1]
-    else:
-        ug_id = argv[0]
+    lib.acl.remove_role(argv[0], autodelete_users_groups=True)
 
-    dom = utils.get_cib_dom()
-    found = False
-    for ug in dom.getElementsByTagName("acl_target") + dom.getElementsByTagName("acl_group"):
-        if ug.getAttribute("id") == ug_id:
-            found = True
-            break
 
-    if not found:
-        utils.err("cannot find user or group: %s" % ug_id)
+def _role_assign_unassign(argv, keyword, not_specific_fn, user_fn, group_fn):
+    argv_len = len(argv)
+    if argv_len < 2:
+        raise CmdLineInputError()
 
-    found = False
-    for current_role in ug.getElementsByTagName("role"):
-        if current_role.getAttribute("id") == role_id:
-            found = True
-            current_role.parentNode.removeChild(current_role)
-            break
+    if argv_len == 2:
+        not_specific_fn(*argv)
+    elif argv_len == 3:
+        role_id, something, ug_id = argv
+        if something == keyword:
+            not_specific_fn(role_id, ug_id)
+        elif something == "user":
+            user_fn(role_id, ug_id)
+        elif something == "group":
+            group_fn(role_id, ug_id)
+        else:
+            raise CmdLineInputError()
+    elif argv_len == 4 and argv[1] == keyword and argv[2] in ["group", "user"]:
+        role_id, _, user_group, ug_id = argv
+        if user_group == "user":
+            user_fn(role_id, ug_id)
+        else:
+            group_fn(role_id, ug_id)
+    else:
+        raise CmdLineInputError()
 
-    if not found:
-        utils.err("cannot find role: %s, assigned to user/group: %s" % (role_id, ug_id))
 
-    if "--autodelete" in utils.pcs_options:
-        if not ug.getElementsByTagName("role"):
-            ug.parentNode.removeChild(ug)
+def role_assign(lib, argv, dummy_modifiers):
+    _role_assign_unassign(
+        argv,
+        "to",
+        lib.acl.assign_role_not_specific,
+        lib.acl.assign_role_to_target,
+        lib.acl.assign_role_to_group
+    )
+
+
+def role_unassign(lib, argv, modifiers):
+    _role_assign_unassign(
+        argv,
+        "from",
+        lambda role_id, ug_id: lib.acl.unassign_role_not_specific(
+            role_id, ug_id, modifiers.get("autodelete", False)
+        ),
+        lambda role_id, ug_id: lib.acl.unassign_role_from_target(
+            role_id, ug_id, modifiers.get("autodelete", False)
+        ),
+        lambda role_id, ug_id: lib.acl.unassign_role_from_group(
+            role_id, ug_id, modifiers.get("autodelete", False)
+        )
+    )
+
+
+def permission_add(lib, argv, dummy_modifiers):
+    if len(argv) < 4:
+        raise CmdLineInputError()
+    role_id, argv_next = argv[0], argv[1:]
+    lib.acl.add_permission(role_id, argv_to_permission_info_list(argv_next))
 
-    utils.replace_cib_configuration(dom)
 
-def run_permission_add(argv):
-    if len(argv) < 4:
+def run_permission_delete(lib, argv, dummy_modifiers):
+    if len(argv) != 1:
         raise CmdLineInputError()
-    role_id = argv.pop(0)
-    permission_info_list = argv_to_permission_info_list(argv)
+    lib.acl.remove_permission(argv[0])
 
-    cib = get_cib(get_cib_xml(utils.cmd_runner()))
-    provide_role(cib, role_id)
-    add_permissions_to_role(cib, role_id, permission_info_list)
-    replace_cib_configuration(utils.cmd_runner(), cib)
 
-def run_permission_delete(argv):
-    dom = utils.get_cib_dom()
-    if len(argv) < 1:
-        usage.acl(["permission delete"])
-        sys.exit(1)
-
-    perm_id = argv.pop(0)
-    found = False
-    for elem in dom.getElementsByTagName("acl_permission"):
-        if elem.getAttribute("id") == perm_id:
-            elem.parentNode.removeChild(elem)
-            found = True
-    if not found:
-        utils.err("Unable to find permission with id: %s" % perm_id)
-
-    utils.replace_cib_configuration(dom)
+def _target_group_to_str(type_name, obj):
+    return ["{0}: {1}".format(type_name.title(), obj.get("id"))] + indent(
+        [" ".join(["Roles:"] + obj.get("role_list", []))]
+    )
+
+def target_to_str(target):
+    return _target_group_to_str("user", target)
+
+
+def group_to_str(group):
+    return _target_group_to_str("group", group)
+
+
+def role_to_str(role):
+    out = []
+    if role.get("description"):
+        out.append("Description: {0}".format(role.get("description")))
+    out += map(_permission_to_str, role.get("permission_list", []))
+    return ["Role: {0}".format(role.get("id"))] + indent(out)
+
+
+def _permission_to_str(permission):
+    out = ["Permission:", permission.get("kind")]
+    if permission.get("xpath") is not None:
+        out += ["xpath", permission.get("xpath")]
+    elif permission.get("reference") is not None:
+        out += ["id", permission.get("reference")]
+    out.append("({0})".format(permission.get("id")))
+    return " ".join(out)
+
diff --git a/pcs/alert.py b/pcs/alert.py
index 17f4e8d..116fde1 100644
--- a/pcs/alert.py
+++ b/pcs/alert.py
@@ -6,6 +6,7 @@ from __future__ import (
 )
 
 import sys
+import json
 from functools import partial
 
 from pcs import (
@@ -38,6 +39,8 @@ def alert_cmd(*args):
             print_alert_config(*args)
         elif sub_cmd == "recipient":
             recipient_cmd(*args)
+        elif sub_cmd == "get_all_alerts":
+            print_alerts_in_json(*args)
         else:
             raise CmdLineInputError()
     except LibraryError as e:
@@ -118,10 +121,10 @@ def alert_update(lib, argv, modifiers):
 
 
 def alert_remove(lib, argv, modifiers):
-    if len(argv) != 1:
+    if len(argv) < 1:
         raise CmdLineInputError()
 
-    lib.alert.remove_alert(argv[0])
+    lib.alert.remove_alert(argv)
 
 
 def recipient_add(lib, argv, modifiers):
@@ -166,10 +169,10 @@ def recipient_update(lib, argv, modifiers):
 
 
 def recipient_remove(lib, argv, modifiers):
-    if len(argv) != 1:
+    if len(argv) < 1:
         raise CmdLineInputError()
 
-    lib.alert.remove_recipient(argv[0])
+    lib.alert.remove_recipient(argv)
 
 
 def _nvset_to_str(nvset_obj):
@@ -230,3 +233,12 @@ def print_alert_config(lib, argv, modifiers):
             print("\n".join(indent(_alert_to_str(alert), 1)))
     else:
         print(" No alerts defined")
+
+
+def print_alerts_in_json(lib, argv, dummy_modifiers):
+    # This is used only by pcsd, will be removed in new architecture
+    if argv:
+        raise CmdLineInputError()
+
+    print(json.dumps(lib.alert.get_all_alerts()))
+
diff --git a/pcs/app.py b/pcs/app.py
index ab9e970..23bd24c 100644
--- a/pcs/app.py
+++ b/pcs/app.py
@@ -181,7 +181,11 @@ def main(argv=None):
         "stonith": stonith.stonith_cmd,
         "property": prop.property_cmd,
         "constraint": constraint.constraint_cmd,
-        "acl": acl.acl_cmd,
+        "acl": lambda argv: acl.acl_cmd(
+            utils.get_library_wrapper(),
+            argv,
+            utils.get_modificators()
+        ),
         "status": status.status_cmd,
         "config": config.config_cmd,
         "pcsd": pcsd.pcsd_cmd,
diff --git a/pcs/cli/booth/console_report.py b/pcs/cli/booth/console_report.py
new file mode 100644
index 0000000..5dd0397
--- /dev/null
+++ b/pcs/cli/booth/console_report.py
@@ -0,0 +1,140 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.common import report_codes as codes
+
+def format_booth_default(value, template):
+    return "" if value in ("booth", "", None) else template.format(value)
+
+#Each value (callable taking report_item.info) returns string template.
+#Optionaly the template can contain placehodler {force} for next processing.
+#Placeholder {force} will be appended if is necessary and if is not presset
+CODE_TO_MESSAGE_BUILDER_MAP = {
+    codes.BOOTH_LACK_OF_SITES: lambda info:
+        "lack of sites for booth configuration (need 2 at least): sites {0}"
+        .format(", ".join(info["sites"]) if info["sites"] else "missing")
+    ,
+
+    codes.BOOTH_EVEN_PEERS_NUM: lambda info:
+        "odd number of peers is required (entered {number} peers)"
+        .format(**info)
+    ,
+
+    codes.BOOTH_ADDRESS_DUPLICATION: lambda info:
+        "duplicate address for booth configuration: {0}"
+        .format(", ".join(info["addresses"]))
+    ,
+
+    codes.BOOTH_CONFIG_UNEXPECTED_LINES: lambda info:
+        "unexpected line appeard in config: \n{0}"
+        .format("\n".join(info["line_list"]))
+    ,
+
+    codes.BOOTH_INVALID_NAME: lambda info:
+        "booth name '{name}' is not valid ({reason})"
+        .format(**info)
+    ,
+
+    codes.BOOTH_TICKET_NAME_INVALID: lambda info:
+        "booth ticket name '{0}' is not valid, use alphanumeric chars or dash"
+        .format(info["ticket_name"])
+    ,
+
+    codes.BOOTH_TICKET_DUPLICATE: lambda info:
+        "booth ticket name '{ticket_name}' already exists in configuration"
+        .format(**info)
+    ,
+
+    codes.BOOTH_TICKET_DOES_NOT_EXIST: lambda info:
+        "booth ticket name '{ticket_name}' does not exist"
+        .format(**info)
+    ,
+
+    codes.BOOTH_ALREADY_IN_CIB: lambda info:
+        "booth instance '{name}' is already created as cluster resource"
+        .format(**info)
+    ,
+
+    codes.BOOTH_NOT_EXISTS_IN_CIB: lambda info:
+        "booth instance '{name}' not found in cib"
+        .format(**info)
+    ,
+
+    codes.BOOTH_CONFIG_IS_USED: lambda info:
+        "booth instance '{0}' is used{1}".format(
+            info["name"],
+            " {0}".format(info["detail"]) if info["detail"] else "",
+        )
+    ,
+
+    codes.BOOTH_MULTIPLE_TIMES_IN_CIB: lambda info:
+        "found more than one booth instance '{name}' in cib"
+        .format(**info)
+    ,
+
+    codes.BOOTH_CONFIG_DISTRIBUTION_STARTED: lambda info:
+        "Sending booth configuration to cluster nodes..."
+    ,
+
+    codes.BOOTH_CONFIG_ACCEPTED_BY_NODE: lambda info:
+        "{node_info}Booth config{desc} saved.".format(
+            desc=(
+                "" if info["name_list"] in [None, [], ["booth"]]
+                else "(s) ({0})".format(", ".join(info["name_list"]))
+            ),
+            node_info="{0}: ".format(info["node"]) if info["node"] else ""
+        )
+    ,
+
+    codes.BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR: lambda info:
+        "Unable to save booth config{desc} on node '{node}': {reason}".format(
+            desc=format_booth_default(info["name"], " ({0})"),
+            **info
+        )
+    ,
+
+    codes.BOOTH_CONFIG_READ_ERROR: lambda info:
+        "Unable to read booth config{desc}.".format(
+            desc=format_booth_default(info["name"], " ({0})")
+        )
+    ,
+
+    codes.BOOTH_FETCHING_CONFIG_FROM_NODE: lambda info:
+        "Fetching booth config{desc} from node '{node}'...".format(
+            desc=format_booth_default(info["config"], " '{0}'"),
+            **info
+        )
+    ,
+
+    codes.BOOTH_DAEMON_STATUS_ERROR: lambda info:
+        "unable to get status of booth daemon: {reason}".format(**info)
+    ,
+
+    codes.BOOTH_TICKET_STATUS_ERROR: "unable to get status of booth tickets",
+
+    codes.BOOTH_PEERS_STATUS_ERROR: "unable to get status of booth peers",
+
+    codes.BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP: lambda info:
+        "cannot determine local site ip, please specify site parameter"
+    ,
+
+    codes.BOOTH_TICKET_OPERATION_FAILED: lambda info:
+        (
+            "unable to {operation} booth ticket '{ticket_name}'"
+            " for site '{site_ip}', reason: {reason}"
+        ).format(**info)
+
+    ,
+
+    codes.BOOTH_SKIPPING_CONFIG: lambda info:
+        "Skipping config file '{config_file}': {reason}".format(**info)
+    ,
+
+    codes.BOOTH_CANNOT_IDENTIFY_KEYFILE:
+        "cannot identify authfile in booth configuration"
+    ,
+}
diff --git a/pcs/cli/booth/test/test_env.py b/pcs/cli/booth/test/test_env.py
index b1d80aa..e1e59e2 100644
--- a/pcs/cli/booth/test/test_env.py
+++ b/pcs/cli/booth/test/test_env.py
@@ -79,13 +79,13 @@ class BoothConfTest(TestCase):
         self, mock_is_file, mock_console_report
     ):
         next_in_line = mock.Mock(side_effect=LibraryEnvError(
-            ReportItem.error(report_codes.FILE_DOES_NOT_EXIST, "", info={
+            ReportItem.error(report_codes.FILE_DOES_NOT_EXIST, info={
                 "file_role": env_file_role_codes.BOOTH_CONFIG,
             }),
-            ReportItem.error(report_codes.FILE_DOES_NOT_EXIST, "", info={
+            ReportItem.error(report_codes.FILE_DOES_NOT_EXIST, info={
                 "file_role": env_file_role_codes.BOOTH_KEY,
             }),
-            ReportItem.error("OTHER ERROR", "", info={}),
+            ReportItem.error("OTHER ERROR", info={}),
         ))
         mock_is_file.return_value = False
         mock_env = mock.MagicMock()
diff --git a/pcs/cli/booth/test/test_reports.py b/pcs/cli/booth/test/test_reports.py
new file mode 100644
index 0000000..9e90ffd
--- /dev/null
+++ b/pcs/cli/booth/test/test_reports.py
@@ -0,0 +1,141 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase
+
+from pcs.cli.booth.console_report import CODE_TO_MESSAGE_BUILDER_MAP
+from pcs.common import report_codes as codes
+
+
+class BoothConfigAccetedByNodeTest(TestCase):
+    def setUp(self):
+        self.build = CODE_TO_MESSAGE_BUILDER_MAP[
+            codes.BOOTH_CONFIG_ACCEPTED_BY_NODE
+        ]
+
+    def test_crete_message_with_empty_name_list(self):
+        self.assertEqual("Booth config saved.", self.build({
+            "node": None,
+            "name_list": [],
+        }))
+
+    def test_crete_message_with_name_booth_only(self):
+        self.assertEqual("Booth config saved.", self.build({
+            "node": None,
+            "name_list": ["booth"],
+        }))
+
+    def test_crete_message_with_single_name(self):
+        self.assertEqual("Booth config(s) (some) saved.", self.build({
+            "node": None,
+            "name_list": ["some"],
+        }))
+
+    def test_crete_message_with_multiple_name(self):
+        self.assertEqual("Booth config(s) (some, another) saved.", self.build({
+            "node": None,
+            "name_list": ["some", "another"],
+        }))
+
+    def test_crete_message_with_empty_node(self):
+        self.assertEqual(
+            "node1: Booth config(s) (some, another) saved.",
+            self.build({
+                "node": "node1",
+                "name_list": ["some", "another"],
+            }),
+        )
+
+class BoothConfigDistributionNodeErrorTest(TestCase):
+    def setUp(self):
+        self.build = CODE_TO_MESSAGE_BUILDER_MAP[
+            codes.BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR
+        ]
+
+    def test_create_message_for_empty_name(self):
+        self.assertEqual(
+            "Unable to save booth config on node 'node1': reason1",
+            self.build({
+                "node": "node1",
+                "reason": "reason1",
+                "name": None,
+            })
+        )
+
+    def test_create_message_for_booth_name(self):
+        self.assertEqual(
+            "Unable to save booth config on node 'node1': reason1",
+            self.build({
+                "node": "node1",
+                "reason": "reason1",
+                "name": "booth",
+            })
+        )
+
+    def test_create_message_for_another_name(self):
+        self.assertEqual(
+            "Unable to save booth config (another) on node 'node1': reason1",
+            self.build({
+                "node": "node1",
+                "reason": "reason1",
+                "name": "another",
+            })
+        )
+
+class BoothConfigReadErrorTest(TestCase):
+    def setUp(self):
+        self.build = CODE_TO_MESSAGE_BUILDER_MAP[
+            codes.BOOTH_CONFIG_READ_ERROR
+        ]
+
+    def test_create_message_for_empty_name(self):
+        self.assertEqual("Unable to read booth config.", self.build({
+            "name": None,
+        }))
+
+    def test_create_message_for_booth_name(self):
+        self.assertEqual("Unable to read booth config.", self.build({
+            "name": "booth",
+        }))
+
+    def test_create_message_for_another_name(self):
+        self.assertEqual("Unable to read booth config (another).", self.build({
+            "name": "another",
+        }))
+
+class BoothFetchingConfigFromNodeTest(TestCase):
+    def setUp(self):
+        self.build = CODE_TO_MESSAGE_BUILDER_MAP[
+            codes.BOOTH_FETCHING_CONFIG_FROM_NODE
+        ]
+
+    def test_create_message_for_empty_name(self):
+        self.assertEqual(
+            "Fetching booth config from node 'node1'...",
+            self.build({
+                "config": None,
+                "node": "node1",
+            })
+        )
+
+    def test_create_message_for_booth_name(self):
+        self.assertEqual(
+            "Fetching booth config from node 'node1'...",
+            self.build({
+                "config": "booth",
+                "node": "node1",
+            })
+        )
+
+    def test_create_message_for_another_name(self):
+        self.assertEqual(
+            "Fetching booth config 'another' from node 'node1'...",
+            self.build({
+                "config": "another",
+                "node": "node1",
+            })
+        )
diff --git a/pcs/cli/common/console_report.py b/pcs/cli/common/console_report.py
index e600168..643550f 100644
--- a/pcs/cli/common/console_report.py
+++ b/pcs/cli/common/console_report.py
@@ -6,7 +6,13 @@ from __future__ import (
 )
 
 import sys
+from functools import partial
 
+from pcs.common import report_codes as codes
+from collections import Iterable
+
+INSTANCE_SUFFIX = "@{0}"
+NODE_PREFIX = "{0}: "
 
 def warn(message):
     sys.stdout.write(format_message(message, "Warning: "))
@@ -28,3 +34,728 @@ def indent(line_list, indent_step=2):
         "{0}{1}".format(" "*indent_step, line) if line else line
         for line in line_list
     ]
+
+def format_optional(value, template):
+    return  "" if not value else template.format(value)
+
+def service_operation_started(operation, info):
+    return "{operation}{service}{instance_suffix}...".format(
+        operation=operation,
+        instance_suffix=format_optional(info["instance"], INSTANCE_SUFFIX),
+        **info
+    )
+
+def service_operation_error(operation, info):
+    return (
+        "{node_prefix}Unable to {operation} {service}{instance_suffix}:"
+        " {reason}"
+    ).format(
+        operation=operation,
+        instance_suffix=format_optional(info["instance"], INSTANCE_SUFFIX),
+        node_prefix=format_optional(info["node"], NODE_PREFIX),
+        **info
+    )
+
+def service_opration_success(operation, info):
+    return "{node_prefix}{service}{instance_suffix} {operation}".format(
+        operation=operation,
+        instance_suffix=format_optional(info["instance"], INSTANCE_SUFFIX),
+        node_prefix=format_optional(info["node"], NODE_PREFIX),
+        **info
+    )
+
+def service_operation_skipped(operation, info):
+    return (
+        "{node_prefix}not {operation}{service}{instance_suffix} - {reason}"
+    ).format(
+        operation=operation,
+        instance_suffix=format_optional(info["instance"], INSTANCE_SUFFIX),
+        node_prefix=format_optional(info["node"], NODE_PREFIX),
+        **info
+    )
+
+
+#Each value (callable taking report_item.info) returns string template.
+#Optionaly the template can contain placehodler {force} for next processing.
+#Placeholder {force} will be appended if is necessary and if is not presset
+CODE_TO_MESSAGE_BUILDER_MAP = {
+
+    codes.COMMON_ERROR: lambda info: info["text"],
+
+    codes.COMMON_INFO: lambda info: info["text"],
+
+    codes.EMPTY_RESOURCE_SET_LIST: "Resource set list is empty",
+
+    codes.REQUIRED_OPTION_IS_MISSING: lambda info:
+        "required option '{option_name}' is missing"
+        .format(**info)
+    ,
+
+    codes.INVALID_OPTION: lambda info:
+        "invalid {desc}option '{option_name}', allowed options are: {allowed_values}"
+        .format(
+            desc=format_optional(info["option_type"], "{0} "),
+            allowed_values=", ".join(info["allowed"]),
+            **info
+        )
+    ,
+
+    codes.INVALID_OPTION_VALUE: lambda info:
+        "'{option_value}' is not a valid {option_name} value, use {hint}"
+        .format(
+            hint=(
+                ", ".join(info["allowed_values"])
+                if (
+                    isinstance(info["allowed_values"], Iterable)
+                    and
+                    not isinstance(info["allowed_values"], "".__class__)
+                )
+                else info["allowed_values"]
+            ),
+            **info
+        )
+    ,
+
+    codes.EMPTY_ID: lambda info:
+        "{id_description} cannot be empty"
+    .format(**info)
+    ,
+
+    codes.INVALID_ID: lambda info:
+        (
+            "invalid {id_description} '{id}', '{invalid_character}' "
+            "is not a valid {desc}character for a {id_description}"
+        ).format(
+            desc="first " if info["is_first_char"] else "",
+            **info
+        )
+    ,
+
+    codes.INVALID_TIMEOUT_VALUE: lambda info:
+        "'{timeout}' is not a valid number of seconds to wait"
+        .format(**info)
+    ,
+
+
+    codes.INVALID_SCORE: lambda info:
+        "invalid score '{score}', use integer or INFINITY or -INFINITY"
+        .format(**info)
+    ,
+
+
+    codes.MULTIPLE_SCORE_OPTIONS: "you cannot specify multiple score options",
+
+
+    codes.RUN_EXTERNAL_PROCESS_STARTED: lambda info:
+        "Running: {command}\n{stdin_part}".format(
+            stdin_part=format_optional(
+                info["stdin"],
+                "--Debug Input Start--\n{0}\n--Debug Input End--\n"
+            ),
+            **info
+        )
+    ,
+
+    codes.RUN_EXTERNAL_PROCESS_FINISHED: lambda info:
+        (
+            "Finished running: {command}\n"
+            "Return value: {return_value}\n"
+            "--Debug Stdout Start--\n"
+            "{stdout}\n"
+            "--Debug Stdout End--\n"
+            "--Debug Stderr Start--\n"
+            "{stderr}\n"
+            "--Debug Stderr End--\n"
+        ).format(**info)
+    ,
+
+    codes.RUN_EXTERNAL_PROCESS_ERROR: lambda info:
+        "unable to run command {command}: {reason}"
+        .format(**info)
+    ,
+
+    codes.NODE_COMMUNICATION_STARTED: lambda info:
+        "Sending HTTP Request to: {target}\n{data_part}".format(
+            data_part=format_optional(
+                info["data"],
+                "--Debug Input Start--\n{0}\n--Debug Input End--\n"
+            ),
+            **info
+        )
+    ,
+
+    codes.NODE_COMMUNICATION_FINISHED: lambda info:
+        (
+            "Finished calling: {target}\n"
+            "Response Code: {response_code}\n"
+            "--Debug Response Start--\n"
+            "{response_data}\n"
+            "--Debug Response End--\n"
+        ).format(**info)
+    ,
+
+    codes.NODE_COMMUNICATION_NOT_CONNECTED: lambda info:
+        "Unable to connect to {node} ({reason})"
+        .format(**info)
+    ,
+
+    codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED: lambda info:
+        (
+            "Unable to authenticate to {node} ({reason}),"
+            " try running 'pcs cluster auth'"
+        )
+        .format(**info)
+    ,
+
+    codes.NODE_COMMUNICATION_ERROR_PERMISSION_DENIED: lambda info:
+        "{node}: Permission denied ({reason})"
+        .format(**info)
+    ,
+
+    codes.NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND: lambda info:
+        "{node}: Unsupported command ({reason}), try upgrading pcsd"
+        .format(**info)
+    ,
+
+    codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL: lambda info:
+        "{node}: {reason}"
+        .format(**info)
+    ,
+
+    codes.NODE_COMMUNICATION_ERROR: lambda info:
+        "Error connecting to {node} ({reason})"
+        .format(**info)
+    ,
+
+    codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT: lambda info:
+        "Unable to connect to {node} ({reason})"
+        .format(**info)
+    ,
+
+    codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED:
+        "Sending updated corosync.conf to nodes..."
+    ,
+
+    codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE: lambda info:
+        "{node}: Succeeded"
+        .format(**info)
+    ,
+
+    codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR: lambda info:
+        "{node}: Unable to set corosync config"
+        .format(**info)
+    ,
+
+    codes.COROSYNC_NOT_RUNNING_CHECK_STARTED:
+        "Checking corosync is not running on nodes..."
+    ,
+
+    codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR: lambda info:
+        "{node}: Unable to check if corosync is not running"
+        .format(**info)
+    ,
+
+    codes.COROSYNC_NOT_RUNNING_ON_NODE: lambda info:
+        "{node}: corosync is not running"
+        .format(**info)
+    ,
+
+    codes.COROSYNC_RUNNING_ON_NODE: lambda info:
+        "{node}: corosync is running"
+        .format(**info)
+    ,
+
+    codes.COROSYNC_QUORUM_GET_STATUS_ERROR: lambda info:
+        "Unable to get quorum status: {reason}"
+        .format(**info)
+    ,
+
+    codes.COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR: lambda info:
+        "Unable to set expected votes: {reason}"
+        .format(**info)
+    ,
+
+    codes.COROSYNC_CONFIG_RELOADED: "Corosync configuration reloaded",
+
+    codes.COROSYNC_CONFIG_RELOAD_ERROR: lambda info:
+        "Unable to reload corosync configuration: {reason}"
+        .format(**info)
+    ,
+
+    codes.UNABLE_TO_READ_COROSYNC_CONFIG: lambda info:
+        "Unable to read {path}: {reason}"
+        .format(**info)
+    ,
+
+    codes.PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE:
+        "Unable to parse corosync config: missing closing brace"
+    ,
+
+    codes.PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE:
+        "Unable to parse corosync config: unexpected closing brace"
+    ,
+
+    codes.PARSE_ERROR_COROSYNC_CONF:
+        "Unable to parse corosync config"
+    ,
+
+    codes.COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE: lambda info:
+        "These options cannot be set when the cluster uses a quorum device: {0}"
+        .format(", ".join(sorted(info["options_names"])))
+    ,
+
+    codes.QDEVICE_ALREADY_DEFINED:
+        "quorum device is already defined"
+    ,
+
+    codes.QDEVICE_NOT_DEFINED:
+        "no quorum device is defined in this cluster"
+    ,
+
+    codes.QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED: (
+        "You need to stop the cluster or remove qdevice from the cluster to"
+        " continue"
+    ),
+
+    codes.QDEVICE_CLIENT_RELOAD_STARTED:
+        "Reloading qdevice configuration on nodes..."
+    ,
+
+    codes.QDEVICE_ALREADY_INITIALIZED: lambda info:
+        "Quorum device '{model}' has been already initialized"
+        .format(**info)
+    ,
+
+    codes.QDEVICE_NOT_INITIALIZED: lambda info:
+        "Quorum device '{model}' has not been initialized yet"
+        .format(**info)
+    ,
+
+    codes.QDEVICE_INITIALIZATION_SUCCESS: lambda info:
+        "Quorum device '{model}' initialized"
+        .format(**info)
+    ,
+
+    codes.QDEVICE_INITIALIZATION_ERROR: lambda info:
+        "Unable to initialize quorum device '{model}': {reason}"
+        .format(**info)
+    ,
+
+    codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED:
+        "Setting up qdevice certificates on nodes..."
+    ,
+
+    codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE: lambda info:
+        "{node}: Succeeded"
+        .format(**info)
+    ,
+
+    codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED:
+        "Removing qdevice certificates from nodes..."
+    ,
+
+    codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE: lambda info:
+        "{node}: Succeeded"
+        .format(**info)
+    ,
+
+    codes.QDEVICE_CERTIFICATE_IMPORT_ERROR: lambda info:
+        "Unable to import quorum device certificate: {reason}"
+        .format(**info)
+    ,
+
+    codes.QDEVICE_CERTIFICATE_SIGN_ERROR: lambda info:
+        "Unable to sign quorum device certificate: {reason}"
+        .format(**info)
+    ,
+
+    codes.QDEVICE_DESTROY_SUCCESS: lambda info:
+        "Quorum device '{model}' configuration files removed"
+        .format(**info)
+    ,
+
+    codes.QDEVICE_DESTROY_ERROR: lambda info:
+        "Unable to destroy quorum device '{model}': {reason}"
+        .format(**info)
+    ,
+
+    codes.QDEVICE_NOT_RUNNING: lambda info:
+        "Quorum device '{model}' is not running"
+        .format(**info)
+    ,
+
+    codes.QDEVICE_GET_STATUS_ERROR: lambda info:
+        "Unable to get status of quorum device '{model}': {reason}"
+        .format(**info)
+    ,
+
+    codes.QDEVICE_USED_BY_CLUSTERS: lambda info:
+        "Quorum device is currently being used by cluster(s): {cluster_list}"
+        .format(cluster_list=", ".join(info["clusters"]))
+    ,
+
+    codes.CMAN_UNSUPPORTED_COMMAND:
+        "This command is not supported on CMAN clusters"
+    ,
+
+    codes.ID_ALREADY_EXISTS: lambda info:
+        "'{id}' already exists"
+        .format(**info)
+    ,
+
+    codes.ID_NOT_FOUND: lambda info:
+        "{desc}'{id}' does not exist"
+        .format(
+            desc=format_optional(info["id_description"], "{0} "),
+            **info
+        )
+    ,
+
+    codes.RESOURCE_DOES_NOT_EXIST: lambda info:
+        "Resource '{resource_id}' does not exist"
+        .format(**info)
+    ,
+
+    codes.CIB_ACL_ROLE_IS_ALREADY_ASSIGNED_TO_TARGET: lambda info:
+        "Role '{role_id}' is already asigned to '{target_id}'"
+        .format(**info)
+    ,
+
+    codes.CIB_ACL_ROLE_IS_NOT_ASSIGNED_TO_TARGET: lambda info:
+        "Role '{role_id}' is not assigned to '{target_id}'"
+        .format(**info)
+    ,
+
+    codes.CIB_ACL_TARGET_ALREADY_EXISTS: lambda info:
+        "'{target_id}' already exists"
+        .format(**info)
+    ,
+
+    codes.CIB_LOAD_ERROR: "unable to get cib",
+
+    codes.CIB_LOAD_ERROR_SCOPE_MISSING: lambda info:
+        "unable to get cib, scope '{scope}' not present in cib"
+        .format(**info)
+    ,
+
+    codes.CIB_LOAD_ERROR_BAD_FORMAT:
+       "unable to get cib, xml does not conform to the schema"
+    ,
+
+    codes.CIB_CANNOT_FIND_MANDATORY_SECTION: lambda info:
+        "Unable to get {section} section of cib"
+        .format(**info)
+    ,
+
+    codes.CIB_PUSH_ERROR: lambda info:
+        "Unable to update cib\n{reason}\n{pushed_cib}"
+        .format(**info)
+    ,
+
+    codes.CRM_MON_ERROR:
+        "error running crm_mon, is pacemaker running?"
+    ,
+
+    codes.BAD_CLUSTER_STATE_FORMAT:
+        "cannot load cluster status, xml does not conform to the schema"
+    ,
+
+    codes.RESOURCE_WAIT_NOT_SUPPORTED:
+        "crm_resource does not support --wait, please upgrade pacemaker"
+    ,
+
+    codes.RESOURCE_WAIT_TIMED_OUT: lambda info:
+        "waiting timeout\n\n{reason}"
+        .format(**info)
+    ,
+
+    codes.RESOURCE_WAIT_ERROR: lambda info:
+        "{reason}"
+        .format(**info)
+    ,
+
+    codes.RESOURCE_CLEANUP_ERROR: lambda info:
+        (
+             "Unable to cleanup resource: {resource}\n{reason}"
+             if info["resource"] else
+             "Unexpected error occured. 'crm_resource -C' error:\n{reason}"
+        ).format(**info)
+    ,
+
+    codes.RESOURCE_CLEANUP_TOO_TIME_CONSUMING: lambda info:
+        (
+             "Cleaning up all resources on all nodes will execute more "
+             "than {threshold} operations in the cluster, which may "
+             "negatively impact the responsiveness of the cluster. "
+             "Consider specifying resource and/or node"
+       ).format(**info)
+    ,
+
+    codes.NODE_NOT_FOUND: lambda info:
+        "node '{node}' does not appear to exist in configuration"
+        .format(**info)
+    ,
+
+    codes.PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND: lambda info:
+        "unable to get local node name from pacemaker: {reason}"
+        .format(**info)
+    ,
+
+    codes.RRP_ACTIVE_NOT_SUPPORTED:
+        "using a RRP mode of 'active' is not supported or tested"
+    ,
+
+    codes.IGNORED_CMAN_UNSUPPORTED_OPTION: lambda info:
+        "{option_name} ignored as it is not supported on CMAN clusters"
+        .format(**info)
+    ,
+
+    codes.NON_UDP_TRANSPORT_ADDR_MISMATCH:
+        "--addr0 and --addr1 can only be used with --transport=udp"
+    ,
+
+    codes.CMAN_UDPU_RESTART_REQUIRED: (
+        "Using udpu transport on a CMAN cluster,"
+        " cluster restart is required after node add or remove"
+    ),
+
+    codes.CMAN_BROADCAST_ALL_RINGS: (
+        "Enabling broadcast for all rings as CMAN does not support"
+        " broadcast in only one ring"
+    ),
+
+    codes.SERVICE_START_STARTED: partial(service_operation_started, "Starting"),
+    codes.SERVICE_START_ERROR: partial(service_operation_error, "start"),
+    codes.SERVICE_START_SUCCESS: partial(service_opration_success, "started"),
+    codes.SERVICE_START_SKIPPED: partial(service_operation_skipped, "starting"),
+
+    codes.SERVICE_STOP_STARTED: partial(service_operation_started, "Stopping"),
+    codes.SERVICE_STOP_ERROR: partial(service_operation_error, "stop"),
+    codes.SERVICE_STOP_SUCCESS: partial(service_opration_success, "stopped"),
+
+    codes.SERVICE_ENABLE_STARTED: partial(
+        service_operation_started, "Enabling"
+    ),
+    codes.SERVICE_ENABLE_ERROR: partial(service_operation_error, "enable"),
+    codes.SERVICE_ENABLE_SUCCESS: partial(service_opration_success, "enabled"),
+    codes.SERVICE_ENABLE_SKIPPED: partial(
+        service_operation_skipped, "enabling"
+    ),
+
+    codes.SERVICE_DISABLE_STARTED:
+        partial(service_operation_started, "Disabling")
+     ,
+    codes.SERVICE_DISABLE_ERROR: partial(service_operation_error, "disable"),
+    codes.SERVICE_DISABLE_SUCCESS: partial(service_opration_success, "disabled"),
+
+    codes.SERVICE_KILL_ERROR: lambda info:
+        "Unable to kill {service_list}: {reason}"
+        .format(
+            service_list=", ".join(info["services"]),
+            **info
+        )
+    ,
+
+    codes.SERVICE_KILL_SUCCESS: lambda info:
+        "{services_list} killed"
+        .format(
+            service_list=", ".join(info["services"]),
+            **info
+        )
+    ,
+
+    codes.UNABLE_TO_GET_AGENT_METADATA: lambda info:
+        (
+            "Agent '{agent}' is not installed or does not provide valid"
+            " metadata: {reason}"
+        ).format(**info)
+    ,
+
+    codes.INVALID_RESOURCE_AGENT_NAME: lambda info:
+        (
+            "Invalid resource agent name '{name}'."
+            " Use standard:provider:type or standard:type."
+            " List of standards and providers can be obtained by using commands"
+            " 'pcs resource standards' and 'pcs resource providers'"
+        )
+        .format(**info)
+    ,
+
+    codes.AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE: lambda info:
+        (
+            "Multiple agents match '{agent}'"
+            ", please specify full name: {possible_agents_str}"
+        ).format(**info)
+    ,
+
+    codes.AGENT_NAME_GUESS_FOUND_NONE: lambda info:
+        "Unable to find agent '{agent}', try specifying its full name"
+        .format(**info)
+    ,
+
+    codes.AGENT_NAME_GUESSED: lambda info:
+        "Assumed agent name '{guessed_name}' (deduced from '{entered_name}')"
+        .format(**info)
+    ,
+
+    codes.OMITTING_NODE: lambda info:
+        "Omitting node '{node}'"
+        .format(**info)
+    ,
+
+    codes.SBD_CHECK_STARTED: "Running SBD pre-enabling checks...",
+
+    codes.SBD_CHECK_SUCCESS: lambda info:
+        "{node}: SBD pre-enabling checks done"
+        .format(**info)
+    ,
+
+    codes.SBD_CONFIG_DISTRIBUTION_STARTED: "Distributing SBD config...",
+
+    codes.SBD_CONFIG_ACCEPTED_BY_NODE: lambda info:
+        "{node}: SBD config saved"
+        .format(**info)
+    ,
+
+    codes.UNABLE_TO_GET_SBD_CONFIG: lambda info:
+        "Unable to get SBD configuration from node '{node}'{reason_suffix}"
+        .format(
+            reason_suffix=format_optional(info["reason"], ": {0}"),
+            **info
+        )
+    ,
+
+    codes.SBD_ENABLING_STARTED: lambda info:
+        "Enabling SBD service..."
+        .format(**info)
+    ,
+
+    codes.SBD_DISABLING_STARTED: "Disabling SBD service...",
+
+    codes.INVALID_RESPONSE_FORMAT: lambda info:
+        "{node}: Invalid format of response"
+        .format(**info)
+    ,
+
+    codes.SBD_NOT_INSTALLED: lambda info:
+        "SBD is not installed on node '{node}'"
+        .format(**info)
+    ,
+
+    codes.WATCHDOG_NOT_FOUND: lambda info:
+        "Watchdog '{watchdog}' does not exist on node '{node}'"
+        .format(**info)
+    ,
+
+    codes.WATCHDOG_INVALID: lambda info:
+        "Watchdog path '{watchdog}' is invalid."
+        .format(**info)
+    ,
+
+    codes.UNABLE_TO_GET_SBD_STATUS: lambda info:
+        "Unable to get status of SBD from node '{node}'{reason_suffix}"
+        .format(
+            reason_suffix=format_optional(info["reason"], ": {0}"),
+            **info
+        )
+    ,
+
+    codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES:
+        "Cluster restart is required in order to apply these changes."
+    ,
+
+    codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS: lambda info:
+        "Recipient '{recipient}' in alert '{alert}' already exists"
+        .format(**info)
+    ,
+
+    codes.CIB_ALERT_RECIPIENT_VALUE_INVALID: lambda info:
+        "Recipient value '{recipient}' is not valid."
+        .format(**info)
+    ,
+
+    codes.CIB_ALERT_NOT_FOUND: lambda info:
+        "Alert '{alert}' not found."
+        .format(**info)
+    ,
+
+    codes.CIB_UPGRADE_SUCCESSFUL: lambda info:
+        "CIB has been upgraded to the latest schema version."
+        .format(**info)
+    ,
+
+    codes.CIB_UPGRADE_FAILED: lambda info:
+        "Upgrading of CIB to the latest schema failed: {reason}"
+        .format(**info)
+    ,
+
+    codes.CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION: lambda info:
+        (
+            "Unable to upgrade CIB to required schema version"
+            " {required_version} or higher. Current version is"
+            " {current_version}. Newer version of pacemaker is needed."
+        )
+        .format(**info)
+    ,
+
+    codes.FILE_ALREADY_EXISTS: lambda info:
+        "{node_prefix}{role_prefix}file {file_path} already exists"
+        .format(
+             node_prefix=format_optional(info["node"], NODE_PREFIX),
+            role_prefix=format_optional(info["file_role"], "{0} "),
+            **info
+        )
+    ,
+
+    codes.FILE_DOES_NOT_EXIST: lambda info:
+        "{file_role} file {file_path} does not exist"
+        .format(**info)
+    ,
+
+    codes.FILE_IO_ERROR: lambda info:
+        "unable to {operation} {file_role}{path_desc}: {reason}"
+        .format(
+            path_desc=format_optional(info["file_path"], " '{0}'"),
+            **info
+        )
+    ,
+
+    codes.UNABLE_TO_DETERMINE_USER_UID: lambda info:
+        "Unable to determine uid of user '{user}'"
+        .format(**info)
+    ,
+
+    codes.UNABLE_TO_DETERMINE_GROUP_GID: lambda info:
+        "Unable to determine gid of group '{group}'"
+        .format(**info)
+    ,
+
+    codes.UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS:
+        "unsupported operation on non systemd systems"
+    ,
+
+    codes.LIVE_ENVIRONMENT_REQUIRED: lambda info:
+        "This command does not support {forbidden_options}"
+        .format(forbidden_options=", ".join(info["forbidden_options"]))
+    ,
+
+    codes.COROSYNC_QUORUM_CANNOT_DISABLE_ATB_DUE_TO_SBD: lambda info:
+        "unable to disable auto_tie_breaker: SBD fencing will have no effect"
+        .format(**info)
+    ,
+
+    codes.SBD_REQUIRES_ATB: lambda info:
+        "auto_tie_breaker quorum option will be enabled to make SBD fencing "
+        "effective. Cluster has to be offline to be able to make this change."
+    ,
+
+    codes.CLUSTER_CONF_LOAD_ERROR_INVALID_FORMAT: lambda info:
+        "unable to get cluster.conf: {reason}"
+        .format(**info)
+    ,
+
+    codes.CLUSTER_CONF_READ_ERROR: lambda info:
+        "Unable to read {path}: {reason}"
+        .format(**info)
+    ,
+}
diff --git a/pcs/cli/common/env.py b/pcs/cli/common/env.py
index b1d951d..60f66a4 100644
--- a/pcs/cli/common/env.py
+++ b/pcs/cli/common/env.py
@@ -16,3 +16,4 @@ class Env(object):
         self.booth = None
         self.auth_tokens_getter = None
         self.debug = False
+        self.cluster_conf_data = None
diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
index 99bfe35..fb5a904 100644
--- a/pcs/cli/common/lib_wrapper.py
+++ b/pcs/cli/common/lib_wrapper.py
@@ -15,11 +15,14 @@ from pcs.cli.common.reports import (
     process_library_reports
 )
 from pcs.lib.commands import (
+    acl,
+    alert,
     booth,
-    quorum,
     qdevice,
+    quorum,
+    resource_agent,
     sbd,
-    alert,
+    stonith_agent,
 )
 from pcs.lib.commands.constraint import (
     colocation as constraint_colocation,
@@ -45,6 +48,7 @@ def cli_env_to_lib_env(cli_env):
         cli_env.corosync_conf_data,
         booth=cli_env.booth,
         auth_tokens_getter=cli_env.auth_tokens_getter,
+        cluster_conf_data=cli_env.cluster_conf_data,
     )
 
 def lib_env_to_cli_env(lib_env, cli_env):
@@ -53,6 +57,8 @@ def lib_env_to_cli_env(lib_env, cli_env):
         cli_env.cib_upgraded = lib_env.cib_upgraded
     if not lib_env.is_corosync_conf_live:
         cli_env.corosync_conf_data = lib_env.get_corosync_conf_data()
+    if not lib_env.is_cluster_conf_live:
+        cli_env.cluster_conf_data = lib_env.get_cluster_conf_data()
 
     #TODO
     #now we know: if is in cli_env booth is in lib_env as well
@@ -104,13 +110,69 @@ def get_module(env, middleware_factory, name):
 
 
 def load_module(env, middleware_factory, name):
-    if name == 'constraint_order':
+    if name == "acl":
         return bind_all(
             env,
             middleware.build(middleware_factory.cib),
             {
-                'set': constraint_order.create_with_set,
-                'show': constraint_order.show,
+                "create_role": acl.create_role,
+                "remove_role": acl.remove_role,
+                "assign_role_not_specific": acl.assign_role_not_specific,
+                "assign_role_to_target": acl.assign_role_to_target,
+                "assign_role_to_group": acl.assign_role_to_group,
+                "unassign_role_not_specific": acl.unassign_role_not_specific,
+                "unassign_role_from_target": acl.unassign_role_from_target,
+                "unassign_role_from_group": acl.unassign_role_from_group,
+                "create_target": acl.create_target,
+                "create_group": acl.create_group,
+                "remove_target": acl.remove_target,
+                "remove_group": acl.remove_group,
+                "add_permission": acl.add_permission,
+                "remove_permission": acl.remove_permission,
+                "get_config": acl.get_config,
+            }
+        )
+
+    if name == "alert":
+        return bind_all(
+            env,
+            middleware.build(middleware_factory.cib),
+            {
+                "create_alert": alert.create_alert,
+                "update_alert": alert.update_alert,
+                "remove_alert": alert.remove_alert,
+                "add_recipient": alert.add_recipient,
+                "update_recipient": alert.update_recipient,
+                "remove_recipient": alert.remove_recipient,
+                "get_all_alerts": alert.get_all_alerts,
+            }
+        )
+
+    if name == "booth":
+        return bind_all(
+            env,
+            middleware.build(
+                middleware_factory.booth_conf,
+                middleware_factory.cib
+            ),
+            {
+                "config_setup": booth.config_setup,
+                "config_destroy": booth.config_destroy,
+                "config_text": booth.config_text,
+                "config_ticket_add": booth.config_ticket_add,
+                "config_ticket_remove": booth.config_ticket_remove,
+                "create_in_cluster": booth.create_in_cluster,
+                "remove_from_cluster": booth.remove_from_cluster,
+                "restart": booth.restart,
+                "config_sync": booth.config_sync,
+                "enable": booth.enable_booth,
+                "disable": booth.disable_booth,
+                "start": booth.start_booth,
+                "stop": booth.stop_booth,
+                "pull": booth.pull_config,
+                "status": booth.get_status,
+                "ticket_grant": booth.ticket_grant,
+                "ticket_revoke": booth.ticket_revoke,
             }
         )
 
@@ -124,33 +186,28 @@ def load_module(env, middleware_factory, name):
             }
         )
 
-    if name == 'constraint_ticket':
+    if name == 'constraint_order':
         return bind_all(
             env,
             middleware.build(middleware_factory.cib),
             {
-                'set': constraint_ticket.create_with_set,
-                'show': constraint_ticket.show,
-                'add': constraint_ticket.create,
-                'remove': constraint_ticket.remove,
+                'set': constraint_order.create_with_set,
+                'show': constraint_order.show,
             }
         )
 
-    if name == "quorum":
+    if name == 'constraint_ticket':
         return bind_all(
             env,
-            middleware.build(middleware_factory.corosync_conf_existing),
+            middleware.build(middleware_factory.cib),
             {
-                "add_device": quorum.add_device,
-                "get_config": quorum.get_config,
-                "remove_device": quorum.remove_device,
-                "set_expected_votes_live": quorum.set_expected_votes_live,
-                "set_options": quorum.set_options,
-                "status": quorum.status_text,
-                "status_device": quorum.status_device_text,
-                "update_device": quorum.update_device,
+                'set': constraint_ticket.create_with_set,
+                'show': constraint_ticket.show,
+                'add': constraint_ticket.create,
+                'remove': constraint_ticket.remove,
             }
         )
+
     if name == "qdevice":
         return bind_all(
             env,
@@ -173,6 +230,37 @@ def load_module(env, middleware_factory, name):
                     qdevice.qdevice_net_sign_certificate_request,
             }
         )
+
+    if name == "quorum":
+        return bind_all(
+            env,
+            middleware.build(middleware_factory.corosync_conf_existing),
+            {
+                "add_device": quorum.add_device,
+                "get_config": quorum.get_config,
+                "remove_device": quorum.remove_device,
+                "set_expected_votes_live": quorum.set_expected_votes_live,
+                "set_options": quorum.set_options,
+                "status": quorum.status_text,
+                "status_device": quorum.status_device_text,
+                "update_device": quorum.update_device,
+            }
+        )
+
+    if name == "resource_agent":
+        return bind_all(
+            env,
+            middleware.build(),
+            {
+                "describe_agent": resource_agent.describe_agent,
+                "list_agents": resource_agent.list_agents,
+                "list_agents_for_standard_and_provider":
+                    resource_agent.list_agents_for_standard_and_provider,
+                "list_ocf_providers": resource_agent.list_ocf_providers,
+                "list_standards": resource_agent.list_standards,
+            }
+        )
+
     if name == "sbd":
         return bind_all(
             env,
@@ -185,46 +273,14 @@ def load_module(env, middleware_factory, name):
                 "get_local_sbd_config": sbd.get_local_sbd_config,
             }
         )
-    if name == "alert":
-        return bind_all(
-            env,
-            middleware.build(middleware_factory.cib),
-            {
-                "create_alert": alert.create_alert,
-                "update_alert": alert.update_alert,
-                "remove_alert": alert.remove_alert,
-                "add_recipient": alert.add_recipient,
-                "update_recipient": alert.update_recipient,
-                "remove_recipient": alert.remove_recipient,
-                "get_all_alerts": alert.get_all_alerts,
-            }
-        )
 
-    if name == "booth":
+    if name == "stonith_agent":
         return bind_all(
             env,
-            middleware.build(
-                middleware_factory.booth_conf,
-                middleware_factory.cib
-            ),
+            middleware.build(),
             {
-                "config_setup": booth.config_setup,
-                "config_destroy": booth.config_destroy,
-                "config_text": booth.config_text,
-                "config_ticket_add": booth.config_ticket_add,
-                "config_ticket_remove": booth.config_ticket_remove,
-                "create_in_cluster": booth.create_in_cluster,
-                "remove_from_cluster": booth.remove_from_cluster,
-                "restart": booth.restart,
-                "config_sync": booth.config_sync,
-                "enable": booth.enable_booth,
-                "disable": booth.disable_booth,
-                "start": booth.start_booth,
-                "stop": booth.stop_booth,
-                "pull": booth.pull_config,
-                "status": booth.get_status,
-                "ticket_grant": booth.ticket_grant,
-                "ticket_revoke": booth.ticket_revoke,
+                "describe_agent": stonith_agent.describe_agent,
+                "list_agents": stonith_agent.list_agents,
             }
         )
 
diff --git a/pcs/cli/common/middleware.py b/pcs/cli/common/middleware.py
index 9254a12..29a9247 100644
--- a/pcs/cli/common/middleware.py
+++ b/pcs/cli/common/middleware.py
@@ -66,5 +66,21 @@ def corosync_conf_existing(local_file_path):
         return result_of_next
     return apply
 
+
+def cluster_conf_read_only(local_file_path):
+    def apply(next_in_line, env, *args, **kwargs):
+        if local_file_path:
+            try:
+                env.cluster_conf_data = open(local_file_path).read()
+            except EnvironmentError as e:
+                raise console_report.error("Unable to read {0}: {1}".format(
+                    local_file_path,
+                    e.strerror
+                ))
+
+        return next_in_line(env, *args, **kwargs)
+    return apply
+
+
 def create_middleware_factory(**kwargs):
     return namedtuple('MiddlewareFactory', kwargs.keys())(**kwargs)
diff --git a/pcs/cli/common/reports.py b/pcs/cli/common/reports.py
index c97cf6f..3178532 100644
--- a/pcs/cli/common/reports.py
+++ b/pcs/cli/common/reports.py
@@ -6,17 +6,56 @@ from __future__ import (
 )
 
 import sys
+from functools import partial
 
-from pcs.cli.constraint_all.console_report import duplicate_constraints_report
+from pcs.cli.booth.console_report import (
+    CODE_TO_MESSAGE_BUILDER_MAP as BOOTH_CODE_TO_MESSAGE_BUILDER_MAP
+)
+from pcs.cli.common.console_report import CODE_TO_MESSAGE_BUILDER_MAP
+from pcs.cli.constraint_all.console_report import (
+    CODE_TO_MESSAGE_BUILDER_MAP as CONSTRAINT_CODE_TO_MESSAGE_BUILDER_MAP
+)
 from pcs.common import report_codes as codes
 from pcs.lib.errors import LibraryError, ReportItemSeverity
 
 
-__CODE_BUILDER_MAP = {
-    codes.DUPLICATE_CONSTRAINTS_EXIST: duplicate_constraints_report,
-}
+__CODE_BUILDER_MAP = {}
+__CODE_BUILDER_MAP.update(CODE_TO_MESSAGE_BUILDER_MAP)
+__CODE_BUILDER_MAP.update(CONSTRAINT_CODE_TO_MESSAGE_BUILDER_MAP)
+__CODE_BUILDER_MAP.update(BOOTH_CODE_TO_MESSAGE_BUILDER_MAP)
+
+def build_default_message_from_report(report_item, force_text):
+    return "Unknown report: {0} info: {1}{2}".format(
+        report_item.code,
+        str(report_item.info),
+        force_text,
+    )
+
+
+def build_message_from_report(code_builder_map, report_item, force_text=""):
+    if report_item.code not in code_builder_map:
+        return build_default_message_from_report(report_item, force_text)
+
+    template = code_builder_map[report_item.code]
+    #Sometimes report item info is not needed for message building.
+    #In this case template is string. Otherwise, template is callable.
+    if callable(template):
+        try:
+            template = template(report_item.info)
+        except(TypeError, KeyError):
+            return build_default_message_from_report(report_item, force_text)
 
 
+    #Message can contain {force} placeholder if there is need to have it on
+    #specific position. Otherwise is appended to the end (if necessary). This
+    #removes the need to explicitly specify placeholder for each message.
+    if force_text and "{force}" not in template:
+        template += "{force}"
+
+    return template.format(force=force_text)
+
+build_report_message = partial(build_message_from_report, __CODE_BUILDER_MAP)
+
 class LibraryReportProcessorToConsole(object):
     def __init__(self, debug=False):
         self.debug = debug
@@ -30,9 +69,9 @@ class LibraryReportProcessorToConsole(object):
             if report_item.severity == ReportItemSeverity.ERROR:
                 errors.append(report_item)
             elif report_item.severity == ReportItemSeverity.WARNING:
-                print("Warning: " + _build_report_message(report_item))
+                print("Warning: " + build_report_message(report_item))
             elif self.debug or report_item.severity != ReportItemSeverity.DEBUG:
-                print(report_item.message)
+                print(build_report_message(report_item))
         if errors:
             raise LibraryError(*errors)
 
@@ -41,14 +80,6 @@ def _prepare_force_text(report_item):
         return ", use --skip-offline to override"
     return ", use --force to override" if report_item.forceable else ""
 
-def _build_report_message(report_item, force_text=""):
-    get_template = __CODE_BUILDER_MAP.get(
-        report_item.code,
-        lambda report_item: report_item.message + "{force}"
-    )
-
-    return get_template(report_item).format(force=force_text)
-
 def process_library_reports(report_item_list):
     """
     report_item_list list of ReportItem
@@ -56,14 +87,14 @@ def process_library_reports(report_item_list):
     critical_error = False
     for report_item in report_item_list:
         if report_item.severity == ReportItemSeverity.WARNING:
-            print("Warning: " + report_item.message)
+            print("Warning: " + build_report_message(report_item))
             continue
 
         if report_item.severity != ReportItemSeverity.ERROR:
-            print(report_item.message)
+            print(build_report_message(report_item))
             continue
 
-        sys.stderr.write('Error: {0}\n'.format(_build_report_message(
+        sys.stderr.write('Error: {0}\n'.format(build_report_message(
             report_item,
             _prepare_force_text(report_item)
         )))
diff --git a/pcs/cli/common/test/test_console_report.py b/pcs/cli/common/test/test_console_report.py
index 63fe55c..746dfe0 100644
--- a/pcs/cli/common/test/test_console_report.py
+++ b/pcs/cli/common/test/test_console_report.py
@@ -6,7 +6,12 @@ from __future__ import (
 )
 
 from pcs.test.tools.pcs_unittest import TestCase
-from pcs.cli.common.console_report import indent
+from pcs.cli.common.console_report import(
+    indent,
+    CODE_TO_MESSAGE_BUILDER_MAP,
+    format_optional,
+)
+from pcs.common import report_codes as codes
 
 class IndentTest(TestCase):
     def test_indent_list_of_lines(self):
@@ -20,3 +25,217 @@ class IndentTest(TestCase):
                 "  second"
             ]
         )
+
+class NameBuildTest(TestCase):
+    """
+    Mixin for the testing of message building.
+    """
+    code = None
+
+    def assert_message_from_info(self, message, info):
+        build = CODE_TO_MESSAGE_BUILDER_MAP[self.code]
+        self.assertEqual(message, build(info))
+
+
+class BuildInvalidOptionMessageTest(NameBuildTest):
+    code = codes.INVALID_OPTION
+    def test_build_message_with_type(self):
+        self.assert_message_from_info(
+            "invalid TYPE option 'NAME', allowed options are: FIRST, SECOND",
+            {
+                "option_name": "NAME",
+                "option_type": "TYPE",
+                "allowed": sorted(["FIRST", "SECOND"]),
+            }
+        )
+
+    def test_build_message_without_type(self):
+        self.assert_message_from_info(
+            "invalid option 'NAME', allowed options are: FIRST, SECOND",
+            {
+                "option_name": "NAME",
+                "option_type": "",
+                "allowed": sorted(["FIRST", "SECOND"]),
+            }
+        )
+
+class BuildInvalidOptionValueMessageTest(NameBuildTest):
+    code = codes.INVALID_OPTION_VALUE
+    def test_build_message_with_multiple_allowed_values(self):
+        self.assert_message_from_info(
+            "'VALUE' is not a valid NAME value, use FIRST, SECOND",
+            {
+                "option_name": "NAME",
+                "option_value": "VALUE",
+                "allowed_values": sorted(["FIRST", "SECOND"]),
+            }
+        )
+
+    def test_build_message_with_hint(self):
+        self.assert_message_from_info(
+            "'VALUE' is not a valid NAME value, use some hint",
+            {
+                "option_name": "NAME",
+                "option_value": "VALUE",
+                "allowed_values": "some hint",
+            }
+        )
+
+class BuildServiceStartErrorTest(NameBuildTest):
+    code = codes.SERVICE_START_ERROR
+    def test_build_message_with_instance_and_node(self):
+        self.assert_message_from_info(
+            "NODE: Unable to start SERVICE at INSTANCE: REASON",
+            {
+                "service": "SERVICE",
+                "reason": "REASON",
+                "node": "NODE",
+                "instance": "INSTANCE",
+            }
+        )
+    def test_build_message_with_instance_only(self):
+        self.assert_message_from_info(
+            "Unable to start SERVICE at INSTANCE: REASON",
+            {
+                "service": "SERVICE",
+                "reason": "REASON",
+                "node": "",
+                "instance": "INSTANCE",
+            }
+        )
+
+    def test_build_message_with_node_only(self):
+        self.assert_message_from_info(
+            "NODE: Unable to start SERVICE: REASON",
+            {
+                "service": "SERVICE",
+                "reason": "REASON",
+                "node": "NODE",
+                "instance": "",
+            }
+        )
+
+    def test_build_message_without_node_and_instance(self):
+        self.assert_message_from_info(
+            "Unable to start SERVICE: REASON",
+            {
+                "service": "SERVICE",
+                "reason": "REASON",
+                "node": "",
+                "instance": "",
+            }
+        )
+
+class BuildInvalidIdTest(NameBuildTest):
+    code = codes.INVALID_ID
+    def test_build_message_with_first_char_invalid(self):
+        self.assert_message_from_info(
+            (
+                "invalid ID_DESCRIPTION 'ID', 'INVALID_CHARACTER' is not a"
+                " valid first character for a ID_DESCRIPTION"
+            ),
+            {
+                "id_description": "ID_DESCRIPTION",
+                "id": "ID",
+                "invalid_character": "INVALID_CHARACTER",
+                "is_first_char": True,
+            }
+        )
+    def test_build_message_with_non_first_char_invalid(self):
+        self.assert_message_from_info(
+            (
+                "invalid ID_DESCRIPTION 'ID', 'INVALID_CHARACTER' is not a"
+                " valid character for a ID_DESCRIPTION"
+            ),
+            {
+                "id_description": "ID_DESCRIPTION",
+                "id": "ID",
+                "invalid_character": "INVALID_CHARACTER",
+                "is_first_char": False,
+            }
+        )
+
+class BuildRunExternalaStartedTest(NameBuildTest):
+    code = codes.RUN_EXTERNAL_PROCESS_STARTED
+
+    def test_build_message_with_stdin(self):
+        self.assert_message_from_info(
+            (
+                "Running: COMMAND\n"
+                "--Debug Input Start--\n"
+                "STDIN\n"
+                "--Debug Input End--\n"
+            ),
+            {
+                "command": "COMMAND",
+                "stdin": "STDIN",
+            }
+        )
+
+    def test_build_message_without_stdin(self):
+        self.assert_message_from_info(
+            "Running: COMMAND\n",
+            {
+                "command": "COMMAND",
+                "stdin": "",
+            }
+        )
+
+class BuildNodeCommunicationStartedTest(NameBuildTest):
+    code = codes.NODE_COMMUNICATION_STARTED
+
+    def test_build_message_with_data(self):
+        self.assert_message_from_info(
+            (
+                "Sending HTTP Request to: TARGET\n"
+                "--Debug Input Start--\n"
+                "DATA\n"
+                "--Debug Input End--\n"
+            ),
+            {
+                "target": "TARGET",
+                "data": "DATA",
+            }
+        )
+
+    def test_build_message_without_data(self):
+        self.assert_message_from_info(
+            "Sending HTTP Request to: TARGET\n",
+            {
+                "target": "TARGET",
+                "data": "",
+            }
+        )
+
+class FormatOptionalTest(TestCase):
+    def test_info_key_is_falsy(self):
+        self.assertEqual("", format_optional("", "{0}: "))
+
+    def test_info_key_is_not_falsy(self):
+        self.assertEqual("A: ", format_optional("A", "{0}: "))
+
+class AgentNameGuessedTest(NameBuildTest):
+    code = codes.AGENT_NAME_GUESSED
+    def test_build_message_with_data(self):
+        self.assert_message_from_info(
+            "Assumed agent name 'ocf:heratbeat:Delay' (deduced from 'Delay')",
+            {
+                "entered_name": "Delay",
+                "guessed_name": "ocf:heratbeat:Delay",
+            }
+        )
+
+class InvalidResourceAgentNameTest(NameBuildTest):
+    code = codes.INVALID_RESOURCE_AGENT_NAME
+    def test_build_message_with_data(self):
+        self.assert_message_from_info(
+            "Invalid resource agent name ':name'."
+                " Use standard:provider:type or standard:type."
+                " List of standards and providers can be obtained by using"
+                " commands 'pcs resource standards' and"
+                " 'pcs resource providers'"
+            ,
+            {
+                "name": ":name",
+            }
+        )
diff --git a/pcs/cli/common/test/test_lib_wrapper.py b/pcs/cli/common/test/test_lib_wrapper.py
index 149e612..f510353 100644
--- a/pcs/cli/common/test/test_lib_wrapper.py
+++ b/pcs/cli/common/test/test_lib_wrapper.py
@@ -42,9 +42,9 @@ class LibraryWrapperTest(TestCase):
 class BindTest(TestCase):
     @mock.patch("pcs.cli.common.lib_wrapper.process_library_reports")
     def test_report_unprocessed_library_env_errors(self, mock_process_report):
-        report1 = ReportItem.error("OTHER ERROR", "", info={})
-        report2 = ReportItem.error("OTHER ERROR", "", info={})
-        report3 = ReportItem.error("OTHER ERROR", "", info={})
+        report1 = ReportItem.error("OTHER ERROR", info={})
+        report2 = ReportItem.error("OTHER ERROR", info={})
+        report3 = ReportItem.error("OTHER ERROR", info={})
         e = LibraryEnvError(report1, report2, report3)
         e.sign_processed(report2)
         mock_middleware = mock.Mock(side_effect=e)
diff --git a/pcs/cli/common/test/test_reports.py b/pcs/cli/common/test/test_reports.py
new file mode 100644
index 0000000..3ea1dd2
--- /dev/null
+++ b/pcs/cli/common/test/test_reports.py
@@ -0,0 +1,84 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase
+from collections import namedtuple
+
+from pcs.cli.common.reports import build_message_from_report
+
+ReportItem = namedtuple("ReportItem", "code info")
+
+class BuildMessageFromReportTest(TestCase):
+    def test_returns_default_message_when_code_not_in_map(self):
+        info = {"first": "FIRST"}
+        self.assertEqual(
+            "Unknown report: SOME info: {0}force text".format(str(info)) ,
+            build_message_from_report(
+                {},
+                ReportItem("SOME", info),
+                "force text"
+            )
+        )
+
+    def test_complete_force_text(self):
+        self.assertEqual(
+            "Message force text is inside",
+            build_message_from_report(
+                {
+                    "SOME": "Message {force} is inside",
+                },
+                ReportItem("SOME", {}),
+                "force text"
+            )
+        )
+
+    def test_deal_with_callable(self):
+        self.assertEqual(
+            "Info: MESSAGE",
+            build_message_from_report(
+                {
+                    "SOME": lambda info: "Info: {message}".format(**info),
+                },
+                ReportItem("SOME", {"message": "MESSAGE"}),
+            )
+        )
+
+    def test_append_force_when_needed_and_not_specified(self):
+        self.assertEqual(
+            "message force at the end",
+            build_message_from_report(
+                {"SOME": "message"},
+                ReportItem("SOME", {}),
+                " force at the end",
+            )
+        )
+
+    def test_returns_default_message_when_conflict_key_appear(self):
+        info = {"message": "MESSAGE"}
+        self.assertEqual(
+            "Unknown report: SOME info: {0}".format(str(info)),
+            build_message_from_report(
+                {
+                    "SOME": lambda info: "Info: {message} {extra}".format(
+                        message="ANY", **info
+                    ),
+                },
+                ReportItem("SOME", info),
+            )
+        )
+
+    def test_returns_default_message_when_key_disappear(self):
+        self.assertEqual(
+            "Unknown report: SOME info: {}"
+            ,
+            build_message_from_report(
+                {
+                    "SOME": lambda info: "Info: {message}".format(**info),
+                },
+                ReportItem("SOME", {}),
+            )
+        )
diff --git a/pcs/cli/constraint_all/console_report.py b/pcs/cli/constraint_all/console_report.py
index b216010..2288272 100644
--- a/pcs/cli/constraint_all/console_report.py
+++ b/pcs/cli/constraint_all/console_report.py
@@ -18,6 +18,7 @@ from pcs.cli.constraint_order.console_report import (
 from pcs.cli.constraint_ticket.console_report import (
     constraint_plain as ticket_plain
 )
+from pcs.common import report_codes as codes
 
 
 def constraint(constraint_type, constraint_info, with_id=True):
@@ -42,14 +43,24 @@ def constraint_plain(constraint_type, options_dict, with_id=False):
 
     return type_report_map[constraint_type](options_dict, with_id)
 
-def duplicate_constraints_report(report_item):
-    line_list = []
-    for constraint_info in report_item.info["constraint_info_list"]:
-        line_list.append(
-            constraint(report_item.info["constraint_type"], constraint_info)
-        )
+#Each value (callable taking report_item.info) returns string template.
+#Optionaly the template can contain placehodler {force} for next processing.
+#Placeholder {force} will be appended if is necessary and if is not presset
+CODE_TO_MESSAGE_BUILDER_MAP = {
+    codes.DUPLICATE_CONSTRAINTS_EXIST: lambda info:
+        "duplicate constraint already exists{force}\n" + "\n".join([
+            "  " + constraint(info["constraint_type"], constraint_info)
+            for constraint_info in info["constraint_info_list"]
+        ])
+    ,
 
-    return (
-        "duplicate constraint already exists{force}\n"
-        + "\n".join(["  " + line for line in line_list])
-    )
+    codes.RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE: lambda info:
+        (
+            "{resource_id} is a {mode} resource, you should use the"
+            " {parent_type} id: {parent_id} when adding constraints"
+        ).format(
+            mode="master/slave" if info["parent_type"] == "master" else "clone",
+            **info
+        )
+    ,
+}
diff --git a/pcs/cli/constraint_all/test/test_console_report.py b/pcs/cli/constraint_all/test/test_console_report.py
index 61be2cc..d686ef6 100644
--- a/pcs/cli/constraint_all/test/test_console_report.py
+++ b/pcs/cli/constraint_all/test/test_console_report.py
@@ -8,6 +8,7 @@ from __future__ import (
 from pcs.test.tools.pcs_unittest import TestCase
 from pcs.test.tools.pcs_unittest import mock
 from pcs.cli.constraint_all import console_report
+from pcs.common import report_codes as codes
 
 class ConstraintTest(TestCase):
     @mock.patch("pcs.cli.constraint_all.console_report.constraint_plain")
@@ -60,13 +61,13 @@ class ConstraintPlainTest(TestCase):
         )
 
 class DuplicateConstraintsReportTest(TestCase):
+    def setUp(self):
+        self.build = console_report.CODE_TO_MESSAGE_BUILDER_MAP[
+            codes.DUPLICATE_CONSTRAINTS_EXIST
+        ]
+
     @mock.patch("pcs.cli.constraint_all.console_report.constraint")
-    def test_translate_from_report_item(self, mock_constraint):
-        report_item = mock.MagicMock()
-        report_item.info = {
-            "constraint_info_list": [{"options": {"a": "b"}}],
-            "constraint_type": "rsc_some"
-        }
+    def test_translate_from_report_info(self, mock_constraint):
         mock_constraint.return_value = "constraint info"
 
         self.assertEqual(
@@ -74,6 +75,38 @@ class DuplicateConstraintsReportTest(TestCase):
                 "duplicate constraint already exists{force}",
                 "  constraint info"
             ]),
-            console_report.duplicate_constraints_report(report_item)
+            self.build({
+                "constraint_info_list": [{"options": {"a": "b"}}],
+                "constraint_type": "rsc_some"
+            })
+        )
 
+class ResourceForConstraintIsMultiinstanceTest(TestCase):
+    def setUp(self):
+        self.build = console_report.CODE_TO_MESSAGE_BUILDER_MAP[
+            codes.RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE
+        ]
+
+    def test_build_message_for_master(self):
+        self.assertEqual(
+            "RESOURCE_PRIMITIVE is a master/slave resource, you should use the"
+                " master id: RESOURCE_MASTER when adding constraints"
+            ,
+            self.build({
+                "resource_id": "RESOURCE_PRIMITIVE",
+                "parent_type": "master",
+                "parent_id": "RESOURCE_MASTER"
+            })
+        )
+
+    def test_build_message_for_clone(self):
+        self.assertEqual(
+            "RESOURCE_PRIMITIVE is a clone resource, you should use the"
+                " clone id: RESOURCE_CLONE when adding constraints"
+            ,
+            self.build({
+                "resource_id": "RESOURCE_PRIMITIVE",
+                "parent_type": "clone",
+                "parent_id": "RESOURCE_CLONE"
+            })
         )
diff --git a/pcs/cluster.py b/pcs/cluster.py
index 68c20f4..0a5918e 100644
--- a/pcs/cluster.py
+++ b/pcs/cluster.py
@@ -36,7 +36,7 @@ from pcs import (
 )
 from pcs.utils import parallel_for_nodes
 from pcs.common import report_codes
-from pcs.cli.common.reports import process_library_reports
+from pcs.cli.common.reports import process_library_reports, build_report_message
 from pcs.lib import (
     pacemaker as lib_pacemaker,
     sbd as lib_sbd,
@@ -56,7 +56,6 @@ from pcs.lib.errors import (
 )
 from pcs.lib.external import (
     disable_service,
-    is_cman_cluster,
     is_systemctl,
     NodeCommunicationException,
     node_communicator_exception_to_report_item,
@@ -881,14 +880,14 @@ def start_cluster(argv):
 
     print("Starting Cluster...")
     service_list = []
-    if utils.is_rhel6():
+    if utils.is_cman_cluster():
 #   Verify that CMAN_QUORUM_TIMEOUT is set, if not, then we set it to 0
         retval, output = getstatusoutput('source /etc/sysconfig/cman ; [ -z "$CMAN_QUORUM_TIMEOUT" ]')
         if retval == 0:
             with open("/etc/sysconfig/cman", "a") as cman_conf_file:
                 cman_conf_file.write("\nCMAN_QUORUM_TIMEOUT=0\n")
 
-        output, retval = utils.run(["service", "cman","start"])
+        output, retval = utils.start_service("cman")
         if retval != 0:
             print(output)
             utils.err("unable to start cman")
@@ -898,7 +897,7 @@ def start_cluster(argv):
             service_list.append("corosync-qdevice")
     service_list.append("pacemaker")
     for service in service_list:
-        output, retval = utils.run(["service", service, "start"])
+        output, retval = utils.start_service(service)
         if retval != 0:
             print(output)
             utils.err("unable to start {0}".format(service))
@@ -919,9 +918,11 @@ def start_cluster_all():
         wait_for_nodes_started(all_nodes, wait_timeout)
 
 def start_cluster_nodes(nodes):
-    error_list = parallel_for_nodes(utils.startCluster, nodes, quiet=True)
-    if error_list:
-        utils.err("unable to start all nodes\n" + "\n".join(error_list))
+    node_errors = parallel_for_nodes(utils.startCluster, nodes, quiet=True)
+    if node_errors:
+        utils.err(
+            "unable to start all nodes\n" + "\n".join(node_errors.values())
+        )
 
 def is_node_fully_started(node_status):
     return (
@@ -943,7 +944,7 @@ def wait_for_local_node_started(stop_at, interval):
             time.sleep(interval)
     except LibraryError as e:
         return 1, "Unable to get node status: {0}".format(
-            "\n".join([item.message for item in e.args])
+            "\n".join([build_report_message(item) for item in e.args])
         )
 
 def wait_for_remote_node_started(node, stop_at, interval):
@@ -977,10 +978,10 @@ def wait_for_nodes_started(node_list, timeout=None):
         else:
             print(output)
     else:
-        error_list = parallel_for_nodes(
+        node_errors = parallel_for_nodes(
             wait_for_remote_node_started, node_list, stop_at, interval
         )
-        if error_list:
+        if node_errors:
             utils.err("unable to verify all nodes have started")
 
 def stop_cluster_all():
@@ -1034,13 +1035,32 @@ def stop_cluster_nodes(nodes):
                 + "\n".join(error_list)
             )
 
-    error_list = parallel_for_nodes(utils.stopPacemaker, nodes, quiet=True)
-    if error_list:
-        utils.err("unable to stop all nodes\n" + "\n".join(error_list))
+    was_error = False
+    node_errors = parallel_for_nodes(utils.stopPacemaker, nodes, quiet=True)
+    accessible_nodes = [
+        node for node in nodes if node not in node_errors.keys()
+    ]
+    if node_errors:
+        utils.err(
+            "unable to stop all nodes\n" + "\n".join(node_errors.values()),
+            exit_after_error=not accessible_nodes
+        )
+        was_error = True
+
+    for node in node_errors.keys():
+        print("{0}: Not stopping cluster - node is unreachable".format(node))
 
-    error_list = parallel_for_nodes(utils.stopCorosync, nodes, quiet=True)
-    if error_list:
-        utils.err("unable to stop all nodes\n" + "\n".join(error_list))
+    node_errors = parallel_for_nodes(
+        utils.stopCorosync,
+        accessible_nodes,
+        quiet=True
+    )
+    if node_errors:
+        utils.err(
+            "unable to stop all nodes\n" + "\n".join(node_errors.values())
+        )
+    if was_error:
+        utils.err("unable to stop all nodes")
 
 def enable_cluster(argv):
     if len(argv) > 0:
@@ -1082,19 +1102,22 @@ def destroy_cluster(argv, keep_going=False):
     if len(argv) > 0:
         # stop pacemaker and resources while cluster is still quorate
         nodes = argv
-        error_list = parallel_for_nodes(utils.stopPacemaker, nodes, quiet=True)
+        node_errors = parallel_for_nodes(utils.stopPacemaker, nodes, quiet=True)
         # proceed with destroy regardless of errors
         # destroy will stop any remaining cluster daemons
-        error_list = parallel_for_nodes(utils.destroyCluster, nodes, quiet=True)
-        if error_list:
+        node_errors = parallel_for_nodes(utils.destroyCluster, nodes, quiet=True)
+        if node_errors:
             if keep_going:
                 print(
                     "Warning: unable to destroy cluster\n"
                     +
-                    "\n".join(error_list)
+                    "\n".join(node_errors.values())
                 )
             else:
-                utils.err("unable to destroy cluster\n" + "\n".join(error_list))
+                utils.err(
+                    "unable to destroy cluster\n"
+                    + "\n".join(node_errors.values())
+                )
 
 def stop_cluster(argv):
     if len(argv) > 0:
@@ -1143,16 +1166,17 @@ def stop_cluster(argv):
 
 def stop_cluster_pacemaker():
     print("Stopping Cluster (pacemaker)...")
-    command = ["service", "pacemaker", "stop"]
-    if not is_systemctl() and is_cman_cluster(utils.cmd_runner()):
+    if not is_systemctl():
+        command = ["service", "pacemaker", "stop"]
         # If --skip-cman is not specified, pacemaker init script will stop cman
         # and corosync as well. That way some of the nodes may stop cman before
         # others stop pacemaker, which leads to quorum loss. We need to keep
         # quorum until all pacemaker resources are stopped as some of them may
         # need quorum to be able to stop.
-        # Additional parameters are not supported if "service" command is
-        # redirected to systemd.
-        command.append("--skip-cman")
+        if utils.is_cman_cluster():
+            command.append("--skip-cman")
+    else:
+        command = ["systemctl", "stop", "pacemaker"]
     output, retval = utils.run(command)
     if retval != 0:
         print(output)
@@ -1161,7 +1185,7 @@ def stop_cluster_pacemaker():
 def stop_cluster_corosync():
     if utils.is_rhel6():
         print("Stopping Cluster (cman)...")
-        output, retval = utils.run(["service", "cman","stop"])
+        output, retval = utils.stop_service("cman")
         if retval != 0:
             print(output)
             utils.err("unable to stop cman")
@@ -1172,7 +1196,7 @@ def stop_cluster_corosync():
             service_list.append("corosync-qdevice")
         service_list.append("corosync")
         for service in service_list:
-            output, retval = utils.run(["service", service, "stop"])
+            output, retval = utils.stop_service(service)
             if retval != 0:
                 print(output)
                 utils.err("unable to stop {0}".format(service))
@@ -1354,21 +1378,22 @@ def _ensure_cluster_is_offline_if_atb_should_be_enabled(
         cluster when determining whenever ATB is needed.
     skip_offline_nodes -- if True offline nodes will be skipped
     """
-    corosync_conf = lib_env.get_corosync_conf()
-    if lib_sbd.atb_has_to_be_enabled(
-        lib_env.cmd_runner(), corosync_conf, node_num_modifier
-    ):
-        print(
-            "Warning: auto_tie_breaker quorum option will be enabled to make "
-            "SBD fencing effecive after this change. Cluster has to be offline "
-            "to be able to make this change."
-        )
-        check_corosync_offline_on_nodes(
-            lib_env.node_communicator(),
-            lib_env.report_processor,
-            corosync_conf.get_nodes(),
-            skip_offline_nodes
-        )
+    if not lib_env.is_cman_cluster:
+        corosync_conf = lib_env.get_corosync_conf()
+        if lib_sbd.atb_has_to_be_enabled(
+            lib_env.cmd_runner(), corosync_conf, node_num_modifier
+        ):
+            print(
+                "Warning: auto_tie_breaker quorum option will be enabled to "
+                "make SBD fencing effecive after this change. Cluster has to "
+                "be offline to be able to make this change."
+            )
+            check_corosync_offline_on_nodes(
+                lib_env.node_communicator(),
+                lib_env.report_processor,
+                corosync_conf.get_nodes(),
+                skip_offline_nodes
+            )
 
 
 def cluster_node(argv):
@@ -1842,11 +1867,10 @@ def cluster_destroy(argv):
         destroy_cluster(utils.getNodesFromCorosyncConf())
     else:
         print("Shutting down pacemaker/corosync services...")
-        os.system("service pacemaker stop")
-        # returns error if qdevice is not running, it is safe to ignore it
-        # since we want it not to be running
-        os.system("service corosync-qdevice stop")
-        os.system("service corosync stop")
+        for service in ["pacemaker", "corosync-qdevice", "corosync"]:
+            # Returns an error if a service is not running. It is safe to
+            # ignore it since we want it not to be running anyways.
+            utils.stop_service(service)
         print("Killing any remaining services...")
         os.system("killall -q -9 corosync corosync-qdevice aisexec heartbeat pacemakerd ccm stonithd ha_logd lrmd crmd pengine attrd pingd mgmtd cib fenced dlm_controld gfs_controld")
         try:
@@ -1856,7 +1880,7 @@ def cluster_destroy(argv):
             # for now
             pass
         try:
-            disable_service(utils.cmd_runner(), "sbd")
+            disable_service(utils.cmd_runner(), lib_sbd.get_sbd_service_name())
         except:
             # it's not a big deal if sbd disable fails
             pass
diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
index 9b05951..f5968fa 100644
--- a/pcs/common/report_codes.py
+++ b/pcs/common/report_codes.py
@@ -18,13 +18,12 @@ FORCE_METADATA_ISSUE = "METADATA_ISSUE"
 FORCE_OPTIONS = "OPTIONS"
 FORCE_QDEVICE_MODEL = "QDEVICE_MODEL"
 FORCE_QDEVICE_USED = "QDEVICE_USED"
-FORCE_UNKNOWN_AGENT = "UNKNOWN_AGENT"
-FORCE_UNSUPPORTED_AGENT = "UNSUPPORTED_AGENT"
 SKIP_OFFLINE_NODES = "SKIP_OFFLINE_NODES"
 SKIP_UNREADABLE_CONFIG = "SKIP_UNREADABLE_CONFIG"
 
-AGENT_GENERAL_ERROR = "AGENT_GENERAL_ERROR"
-AGENT_NOT_FOUND = "AGENT_NOT_FOUND"
+AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE = "AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE"
+AGENT_NAME_GUESS_FOUND_NONE = "AGENT_NAME_GUESS_FOUND_NONE"
+AGENT_NAME_GUESSED = "AGENT_NAME_GUESSED"
 BAD_CLUSTER_STATE_FORMAT = 'BAD_CLUSTER_STATE_FORMAT'
 BOOTH_ADDRESS_DUPLICATION = "BOOTH_ADDRESS_DUPLICATION"
 BOOTH_ALREADY_IN_CIB = "BOOTH_ALREADY_IN_CIB"
@@ -54,6 +53,9 @@ BOOTH_TICKET_NAME_INVALID = "BOOTH_TICKET_NAME_INVALID"
 BOOTH_TICKET_OPERATION_FAILED = "BOOTH_TICKET_OPERATION_FAILED"
 BOOTH_TICKET_STATUS_ERROR = "BOOTH_TICKET_STATUS_ERROR"
 BOOTH_UNSUPORTED_FILE_LOCATION = "BOOTH_UNSUPORTED_FILE_LOCATION"
+CIB_ACL_ROLE_IS_ALREADY_ASSIGNED_TO_TARGET = "CIB_ACL_ROLE_IS_ALREADY_ASSIGNED_TO_TARGET"
+CIB_ACL_ROLE_IS_NOT_ASSIGNED_TO_TARGET = "CIB_ACL_ROLE_IS_NOT_ASSIGNED_TO_TARGET"
+CIB_ACL_TARGET_ALREADY_EXISTS = "CIB_ACL_TARGET_ALREADY_EXISTS"
 CIB_ALERT_NOT_FOUND = "CIB_ALERT_NOT_FOUND"
 CIB_ALERT_RECIPIENT_ALREADY_EXISTS = "CIB_ALERT_RECIPIENT_ALREADY_EXISTS"
 CIB_ALERT_RECIPIENT_VALUE_INVALID = "CIB_ALERT_RECIPIENT_VALUE_INVALID"
@@ -65,6 +67,8 @@ CIB_PUSH_ERROR = "CIB_PUSH_ERROR"
 CIB_UPGRADE_FAILED = "CIB_UPGRADE_FAILED"
 CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION = "CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION"
 CIB_UPGRADE_SUCCESSFUL = "CIB_UPGRADE_SUCCESSFUL"
+CLUSTER_CONF_LOAD_ERROR_INVALID_FORMAT = "CLUSTER_CONF_LOAD_ERROR_INVALID_FORMAT"
+CLUSTER_CONF_READ_ERROR = "CLUSTER_CONF_READ_ERROR"
 CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES = "CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES"
 CMAN_BROADCAST_ALL_RINGS = 'CMAN_BROADCAST_ALL_RINGS'
 CMAN_UDPU_RESTART_REQUIRED = 'CMAN_UDPU_RESTART_REQUIRED'
@@ -88,6 +92,7 @@ COROSYNC_RUNNING_ON_NODE = "COROSYNC_RUNNING_ON_NODE"
 CRM_MON_ERROR = "CRM_MON_ERROR"
 DUPLICATE_CONSTRAINTS_EXIST = "DUPLICATE_CONSTRAINTS_EXIST"
 EMPTY_RESOURCE_SET_LIST = "EMPTY_RESOURCE_SET_LIST"
+EMPTY_ID = "EMPTY_ID"
 FILE_ALREADY_EXISTS = "FILE_ALREADY_EXISTS"
 FILE_DOES_NOT_EXIST = "FILE_DOES_NOT_EXIST"
 FILE_IO_ERROR = "FILE_IO_ERROR"
@@ -95,10 +100,10 @@ ID_ALREADY_EXISTS = 'ID_ALREADY_EXISTS'
 ID_NOT_FOUND = 'ID_NOT_FOUND'
 IGNORED_CMAN_UNSUPPORTED_OPTION = 'IGNORED_CMAN_UNSUPPORTED_OPTION'
 INVALID_ID = "INVALID_ID"
-INVALID_METADATA_FORMAT = 'INVALID_METADATA_FORMAT'
 INVALID_OPTION = "INVALID_OPTION"
 INVALID_OPTION_VALUE = "INVALID_OPTION_VALUE"
 INVALID_RESOURCE_NAME = 'INVALID_RESOURCE_NAME'
+INVALID_RESOURCE_AGENT_NAME = 'INVALID_RESOURCE_AGENT_NAME'
 INVALID_RESPONSE_FORMAT = "INVALID_RESPONSE_FORMAT"
 INVALID_SCORE = "INVALID_SCORE"
 INVALID_TIMEOUT_VALUE = "INVALID_TIMEOUT_VALUE"
@@ -182,7 +187,6 @@ UNABLE_TO_READ_COROSYNC_CONFIG = "UNABLE_TO_READ_COROSYNC_CONFIG"
 UNABLE_TO_GET_SBD_CONFIG = "UNABLE_TO_GET_SBD_CONFIG"
 UNABLE_TO_GET_SBD_STATUS = "UNABLE_TO_GET_SBD_STATUS"
 UNKNOWN_COMMAND = 'UNKNOWN_COMMAND'
-UNSUPPORTED_AGENT = 'UNSUPPORTED_AGENT'
 WATCHDOG_INVALID = "WATCHDOG_INVALID"
 UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS = "UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS"
 WATCHDOG_NOT_FOUND = "WATCHDOG_NOT_FOUND"
diff --git a/pcs/lib/booth/reports.py b/pcs/lib/booth/reports.py
index 6aa9d3d..9bcb7ca 100644
--- a/pcs/lib/booth/reports.py
+++ b/pcs/lib/booth/reports.py
@@ -16,12 +16,8 @@ def booth_lack_of_sites(site_list):
     """
     return ReportItem.error(
         report_codes.BOOTH_LACK_OF_SITES,
-        "lack of sites for booth configuration (need 2 at least):"
-            " sites {sites_string}"
-        ,
         info={
             "sites": site_list,
-            "sites_string": ", ".join(site_list) if site_list else "missing",
         }
     )
 
@@ -32,7 +28,6 @@ def booth_even_peers_num(number):
     """
     return ReportItem.error(
         report_codes.BOOTH_EVEN_PEERS_NUM,
-        "odd number of peers is required (entered {number} peers)",
         info={
             "number": number,
         }
@@ -45,11 +40,8 @@ def booth_address_duplication(duplicate_addresses):
     """
     return ReportItem.error(
         report_codes.BOOTH_ADDRESS_DUPLICATION,
-        "duplicate address for booth configuration: {addresses_string}"
-        ,
         info={
             "addresses": duplicate_addresses,
-            "addresses_string": ", ".join(duplicate_addresses),
         }
     )
 
@@ -61,10 +53,8 @@ def booth_config_unexpected_lines(line_list):
     """
     return ReportItem.error(
         report_codes.BOOTH_CONFIG_UNEXPECTED_LINES,
-        "unexpected line appeard in config: \n{lines_string}",
         info={
             "line_list": line_list,
-            "lines_string": "\n".join(line_list)
         }
     )
 
@@ -76,8 +66,6 @@ def booth_invalid_name(name, reason):
     """
     return ReportItem.error(
         report_codes.BOOTH_INVALID_NAME,
-            "booth name '{name}' is not valid ({reason})"
-        ,
         info={
             "name": name,
             "reason": reason,
@@ -92,9 +80,6 @@ def booth_ticket_name_invalid(ticket_name):
     """
     return ReportItem.error(
         report_codes.BOOTH_TICKET_NAME_INVALID,
-        "booth ticket name '{ticket_name}' is not valid,"
-            " use alphanumeric chars or dash"
-        ,
         info={
             "ticket_name": ticket_name,
         }
@@ -108,7 +93,6 @@ def booth_ticket_duplicate(ticket_name):
     """
     return ReportItem.error(
         report_codes.BOOTH_TICKET_DUPLICATE,
-        "booth ticket name '{ticket_name}' already exists in configuration",
         info={
             "ticket_name": ticket_name,
         }
@@ -122,7 +106,6 @@ def booth_ticket_does_not_exist(ticket_name):
     """
     return ReportItem.error(
         report_codes.BOOTH_TICKET_DOES_NOT_EXIST,
-        "booth ticket name '{ticket_name}' does not exist",
         info={
             "ticket_name": ticket_name,
         }
@@ -136,7 +119,6 @@ def booth_already_in_cib(name):
     """
     return ReportItem.error(
         report_codes.BOOTH_ALREADY_IN_CIB,
-        "booth instance '{name}' is already created as cluster resource",
         info={
             "name": name,
         }
@@ -149,7 +131,6 @@ def booth_not_exists_in_cib(name):
     """
     return ReportItem.error(
         report_codes.BOOTH_NOT_EXISTS_IN_CIB,
-        "booth instance '{name}' not found in cib",
         info={
             "name": name,
         }
@@ -164,7 +145,6 @@ def booth_config_is_used(name, detail=""):
     """
     return ReportItem.error(
         report_codes.BOOTH_CONFIG_IS_USED,
-        "booth instance '{name}' is used{detail_string}",
         info={
             "name": name,
             "detail": detail,
@@ -188,7 +168,6 @@ def booth_multiple_times_in_cib(
     return ReportItem(
         report_codes.BOOTH_MULTIPLE_TIMES_IN_CIB,
         severity,
-        "found more than one booth instance '{name}' in cib",
         info={
             "name": name,
         },
@@ -203,7 +182,6 @@ def booth_config_distribution_started():
     """
     return ReportItem.info(
         report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
-        "Sending booth configuration to cluster nodes..."
     )
 
 
@@ -214,21 +192,10 @@ def booth_config_accepted_by_node(node=None, name_list=None):
     node -- name of node
     name_list -- list of names of booth instance
     """
-    if name_list:
-        name = ", ".join(name_list)
-        if name == "booth":
-            msg = "Booth config saved."
-        else:
-            msg = "Booth config(s) ({name}) saved."
-    else:
-        msg = "Booth config saved."
-        name = None
     return ReportItem.info(
         report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
-        msg if node is None else "{node}: " + msg,
         info={
             "node": node,
-            "name": name,
             "name_list": name_list
         }
     )
@@ -242,13 +209,8 @@ def booth_config_distribution_node_error(node, reason, name=None):
     reason -- reason of failure
     name -- name of booth instance
     """
-    if name and name != "booth":
-        msg = "Unable to save booth config ({name}) on node '{node}': {reason}"
-    else:
-        msg = "Unable to save booth config on node '{node}': {reason}"
     return ReportItem.error(
         report_codes.BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR,
-        msg,
         info={
             "node": node,
             "name": name,
@@ -267,14 +229,9 @@ def booth_config_read_error(
     severity -- severity of report item
     forceable -- is this report item forceable? by what category?
     """
-    if name and name != "booth":
-        msg = "Unable to read booth config ({name})."
-    else:
-        msg = "Unable to read booth config."
     return ReportItem(
         report_codes.BOOTH_CONFIG_READ_ERROR,
         severity,
-        msg,
         info={"name": name},
         forceable=forceable
     )
@@ -287,13 +244,8 @@ def booth_fetching_config_from_node_started(node, config=None):
     node -- node from which config is fetching
     config -- config name
     """
-    if config or config == 'booth':
-        msg = "Fetching booth config from node '{node}'..."
-    else:
-        msg = "Fetching booth config '{config}' from node '{node}'..."
     return ReportItem.info(
         report_codes.BOOTH_FETCHING_CONFIG_FROM_NODE,
-        msg,
         info={
             "node": node,
             "config": config,
@@ -310,7 +262,6 @@ def booth_unsupported_file_location(file):
     """
     return ReportItem.warning(
         report_codes.BOOTH_UNSUPORTED_FILE_LOCATION,
-        "skipping file {file}: unsupported file location",
         info={"file": file}
     )
 
@@ -323,7 +274,6 @@ def booth_daemon_status_error(reason):
     """
     return ReportItem.error(
         report_codes.BOOTH_DAEMON_STATUS_ERROR,
-        "unable to get status of booth daemon: {reason}",
         info={"reason": reason}
     )
 
@@ -336,7 +286,6 @@ def booth_tickets_status_error(reason=None):
     """
     return ReportItem.error(
         report_codes.BOOTH_TICKET_STATUS_ERROR,
-        "unable to get status of booth tickets",
         info={
             "reason": reason,
         }
@@ -351,7 +300,6 @@ def booth_peers_status_error(reason=None):
     """
     return ReportItem.error(
         report_codes.BOOTH_PEERS_STATUS_ERROR,
-        "unable to get status of booth peers",
         info={
             "reason": reason,
         }
@@ -365,7 +313,6 @@ def booth_cannot_determine_local_site_ip():
     """
     return ReportItem.error(
         report_codes.BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP,
-        "cannot determine local site ip, please specify site parameter",
         info={}
     )
 
@@ -380,9 +327,6 @@ def booth_ticket_operation_failed(operation, reason, site_ip, ticket_name):
     """
     return ReportItem.error(
         report_codes.BOOTH_TICKET_OPERATION_FAILED,
-        "unable to {operation} booth ticket '{ticket_name}' for site '{site_ip}', "
-            "reason: {reason}"
-        ,
         info={
             "operation": operation,
             "reason": reason,
@@ -400,7 +344,6 @@ def booth_skipping_config(config_file, reason):
     """
     return ReportItem.warning(
         report_codes.BOOTH_SKIPPING_CONFIG,
-        "Skipping config file '{config_file}': {reason}",
         info={
             "config_file": config_file,
             "reason": reason,
@@ -411,7 +354,6 @@ def booth_cannot_identify_keyfile(severity=ReportItemSeverity.ERROR):
     return ReportItem(
         report_codes.BOOTH_CANNOT_IDENTIFY_KEYFILE,
         severity,
-        "cannot identify authfile in booth configuration",
         info={},
         forceable=report_codes.FORCE_BOOTH_DESTROY
             if severity == ReportItemSeverity.ERROR else None
diff --git a/pcs/lib/booth/test/test_sync.py b/pcs/lib/booth/test/test_sync.py
index 701b086..447f894 100644
--- a/pcs/lib/booth/test/test_sync.py
+++ b/pcs/lib/booth/test/test_sync.py
@@ -77,7 +77,6 @@ class SetConfigOnNodeTest(TestCase):
                 report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
                 {
                     "node": self.node.label,
-                    "name": "cfg_name",
                     "name_list": ["cfg_name"]
                 }
             )]
@@ -107,7 +106,6 @@ class SetConfigOnNodeTest(TestCase):
                 report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
                 {
                     "node": self.node.label,
-                    "name": "cfg_name",
                     "name_list": ["cfg_name"]
                 }
             )]
@@ -383,7 +381,6 @@ class SendAllConfigToNodeTest(TestCase):
                     report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
                     {
                         "node": self.node.label,
-                        "name": "name1.conf, file1.key, name2.conf, file2.key",
                         "name_list": [
                             "name1.conf", "file1.key", "name2.conf", "file2.key"
                         ]
@@ -619,7 +616,6 @@ class SendAllConfigToNodeTest(TestCase):
                     report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
                     {
                         "node": self.node.label,
-                        "name": "name2.conf, file2.key",
                         "name_list": ["name2.conf", "file2.key"]
                     }
                 )
@@ -1066,7 +1062,6 @@ class SendAllConfigToNodeTest(TestCase):
                     report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
                     {
                         "node": self.node.label,
-                        "name": "name1.conf, name2.conf, file2.key",
                         "name_list": ["name1.conf", "name2.conf", "file2.key"]
                     }
                 )
@@ -1158,7 +1153,6 @@ class SendAllConfigToNodeTest(TestCase):
                     report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
                     {
                         "node": self.node.label,
-                        "name": "name2.conf, file2.key",
                         "name_list": ["name2.conf", "file2.key"]
                     }
                 )
diff --git a/pcs/lib/cib/acl.py b/pcs/lib/cib/acl.py
index b4bc279..7f91cba 100644
--- a/pcs/lib/cib/acl.py
+++ b/pcs/lib/cib/acl.py
@@ -10,16 +10,45 @@ from lxml import etree
 from pcs.lib import reports
 from pcs.lib.errors import LibraryError
 from pcs.lib.cib.tools import (
+    etree_element_attibutes_to_dict,
     check_new_id_applicable,
     does_id_exist,
     find_unique_id,
     get_acls,
 )
 
-class AclRoleNotFound(LibraryError):
+
+class AclError(Exception):
     pass
 
-def __validate_permissions(tree, permission_info_list):
+
+class AclRoleNotFound(AclError):
+    # pylint: disable=super-init-not-called
+    def __init__(self, role_id):
+        self.role_id = role_id
+
+
+class AclTargetNotFound(AclError):
+    # pylint: disable=super-init-not-called
+    def __init__(self, target_id):
+        self.target_id = target_id
+
+
+class AclGroupNotFound(AclError):
+    # pylint: disable=super-init-not-called
+    def __init__(self, group_id):
+        self.group_id = group_id
+
+
+def validate_permissions(tree, permission_info_list):
+    """
+    Validate given permission list.
+    Raise LibraryError if any of permission is not valid.
+
+    tree -- cib tree
+    permission_info_list -- list of tuples like this:
+        ("read|write|deny", "xpath|id", <id-or-xpath-string>)
+    """
     report_items = []
     allowed_permissions = ["read", "write", "deny"]
     allowed_scopes = ["xpath", "id"]
@@ -44,14 +73,40 @@ def __validate_permissions(tree, permission_info_list):
     if report_items:
         raise LibraryError(*report_items)
 
-def __find_role(tree, role_id):
+
+def find_role(tree, role_id):
+    """
+    Returns acl_role element with specified role_id in given tree.
+    Raise AclRoleNotFound if role doesn't exist.
+
+    tree -- etree node
+    role_id -- id of role
+    """
     role = tree.find('.//acl_role[@id="{0}"]'.format(role_id))
     if role is not None:
         return role
-    raise AclRoleNotFound(reports.id_not_found(role_id, "role"))
+    raise AclRoleNotFound(role_id)
 
-def create_role(tree, role_id, description=""):
+
+def _find_permission(tree, permission_id):
+    """
+    Returns acl_permission element with specified id.
+    Raises LibraryError if that permission doesn't exist.
+
+    tree -- etree node
+    permisson_id -- id of permision element
     """
+    permission = tree.find(".//acl_permission[@id='{0}']".format(permission_id))
+    if permission is not None:
+        return permission
+    raise LibraryError(reports.id_not_found(permission_id, "permission"))
+
+
+def create_role(tree, role_id, description=None):
+    """
+    Create new role element and add it to cib.
+    Returns newly created role element.
+
     role_id id of desired role
     description role description
     """
@@ -59,46 +114,320 @@ def create_role(tree, role_id, description=""):
     role = etree.SubElement(get_acls(tree), "acl_role", id=role_id)
     if description:
         role.set("description", description)
+    return role
+
+
+def remove_role(tree, role_id, autodelete_users_groups=False):
+    """
+    Remove role with specified id from CIB and all references to it.
+
+    tree -- etree node
+    role_id -- id of role to be removed
+    autodelete_users_group -- if True remove targets with no role after removing
+    """
+    acl_role = find_role(tree, role_id)
+    acl_role.getparent().remove(acl_role)
+    for role_el in tree.findall(".//role[@id='{0}']".format(role_id)):
+        role_parent = role_el.getparent()
+        role_parent.remove(role_el)
+        if autodelete_users_groups and role_parent.find(".//role") is None:
+            role_parent.getparent().remove(role_parent)
+
+
+def assign_role(target_el, role_el):
+    """
+    Assign role element to specified target/group element.
+    Raise LibraryError if role is already assigned to target/group.
+
+    target_el -- etree element of target/group to which role should be assign
+    role_el -- etree element of role
+    """
+    assigned_role = target_el.find(
+        "./role[@id='{0}']".format(role_el.get("id"))
+    )
+    if assigned_role is not None:
+        raise LibraryError(reports.acl_role_is_already_assigned_to_target(
+            role_el.get("id"), target_el.get("id")
+        ))
+    etree.SubElement(target_el, "role", {"id": role_el.get("id")})
+
+
+def unassign_role(target_el, role_id, autodelete_target=False):
+    """
+    Unassign role with role_id from specified target/user target_el.
+    Raise LibraryError if role is not assigned to target/group.
+
+    target_el -- etree element of target/group from which role should be
+        unassign
+    role_id -- id of role
+    autodelete_target -- if True remove target_el if there is no role assigned
+    """
+    assigned_role = target_el.find("./role[@id='{0}']".format(role_id))
+    if assigned_role is None:
+        raise LibraryError(reports.acl_role_is_not_assigned_to_target(
+            role_id, target_el.get("id")
+        ))
+    target_el.remove(assigned_role)
+    if autodelete_target and target_el.find("./role") is None:
+        target_el.getparent().remove(target_el)
+
+
+def find_target(tree, target_id):
+    """
+    Return acl_target etree element with specified id.
+    Raise AclTargetNotFound if target with specified id doesn't exist.
+
+    tree -- etree node
+    target_id -- if of target to find
+    """
+    role = get_acls(tree).find('./acl_target[@id="{0}"]'.format(target_id))
+    if role is None:
+        raise AclTargetNotFound(target_id)
+    return role
+
+
+def find_group(tree, group_id):
+    """
+    Returns acl_group etree element with specified id.
+    Raise AclGroupNotFound if group with group_id doesn't exist.
+
+    tree -- etree node
+    group_id -- id of group to find
+    """
+    role = get_acls(tree).find('./acl_group[@id="{0}"]'.format(group_id))
+    if role is None:
+        raise AclGroupNotFound(group_id)
+    return role
+
 
 def provide_role(tree, role_id):
     """
+    Returns role with id role_id. If doesn't exist, it will be created.
     role_id id of desired role
-    description role description
     """
     try:
-        __find_role(tree, role_id)
+        return find_role(tree, role_id)
     except AclRoleNotFound:
-        create_role(tree, role_id)
+        return create_role(tree, role_id)
+
 
-def add_permissions_to_role(tree, role_id, permission_info_list):
+def create_target(tree, target_id):
     """
-    tree etree node
-    role_id value of atribute id, which exists in dom
-    permission_info_list list of tuples,
-        each contains (permission, scope_type, scope)
+    Creates new acl_target element with id target_id.
+    Raises LibraryError if target with wpecified id aleready exists.
+
+    tree -- etree node
+    target_id -- id of new target
+    """
+    acl_el = get_acls(tree)
+    # id of element acl_target is not type ID in CIB ACL schema so we don't need
+    # to check if it is unique ID in whole CIB
+    if acl_el.find("./acl_target[@id='{0}']".format(target_id)) is not None:
+        raise LibraryError(reports.acl_target_already_exists(target_id))
+    return etree.SubElement(get_acls(tree), "acl_target", id=target_id)
+
+
+def create_group(tree, group_id):
+    """
+    Creates new acl_group element with specified id.
+    Raises LibraryError if tree contains element with id group_id.
+
+    tree -- etree node
+    group_id -- id of new group
     """
-    __validate_permissions(tree, permission_info_list)
+    check_new_id_applicable(tree, "ACL group", group_id)
+    return etree.SubElement(get_acls(tree), "acl_group", id=group_id)
 
+
+def remove_target(tree, target_id):
+    """
+    Removes acl_target element from tree with specified id.
+    Raises LibraryError if target with id target_id doesn't exist.
+
+    tree -- etree node
+    target_id -- id of target element to remove
+    """
+    try:
+        target = find_target(tree, target_id)
+        target.getparent().remove(target)
+    except AclTargetNotFound:
+        raise LibraryError(reports.id_not_found(target_id, "user"))
+
+
+def remove_group(tree, group_id):
+    """
+    Removes acl_group element from tree with specified id.
+    Raises LibraryError if group with id group_id doesn't exist.
+
+    tree -- etree node
+    group_id -- id of group element to remove
+    """
+    try:
+        group = find_group(tree, group_id)
+        group.getparent().remove(group)
+    except AclGroupNotFound:
+        raise LibraryError(reports.id_not_found(group_id, "group"))
+
+
+def add_permissions_to_role(role_el, permission_info_list):
+    """
+    Add permissions from permission_info_list to role_el.
+
+    role_el -- acl_role element to which permissions should be added
+    permission_info_list -- list of tuples,
+        each contains (permission, scope_type, scope)
+    """
     area_type_attribute_map = {
         'xpath': 'xpath',
         'id': 'reference',
     }
     for permission, scope_type, scope in permission_info_list:
-        perm = etree.SubElement(__find_role(tree, role_id), "acl_permission")
+        perm = etree.SubElement(role_el, "acl_permission")
         perm.set(
             "id",
-            find_unique_id(tree, "{0}-{1}".format(role_id, permission))
+            find_unique_id(
+                role_el,
+                "{0}-{1}".format(role_el.get("id", "role"), permission)
+            )
         )
         perm.set("kind", permission)
         perm.set(area_type_attribute_map[scope_type], scope)
 
+
+def remove_permission(tree, permission_id):
+    """
+    Remove permission with id permission_id from tree.
+
+    tree -- etree node
+    permission_id -- id of permission element to be removed
+    """
+    permission = _find_permission(tree, permission_id)
+    permission.getparent().remove(permission)
+
+
+def get_role_list(tree):
+    """
+    Returns list of all acl_role elements from tree.
+    Format of items of output list:
+        {
+            "id": <role-id>,
+            "description": <role-description>,
+            "permission_list": [<see function _get_all_permission_list>, ...]
+        }
+
+    tree -- etree node
+    """
+    output_list = []
+    for role_el in get_acls(tree).findall("./acl_role"):
+        role = etree_element_attibutes_to_dict(
+            role_el, ["id", "description"]
+        )
+        role["permission_list"] = _get_permission_list(role_el)
+        output_list.append(role)
+    return output_list
+
+
+def _get_permission_list(role_el):
+    """
+    Return list of all permissions of role element role_el.
+    Format of item of output list (if attribute is misssing in element under its
+    key there is None):
+        {
+            "id": <id of permission element>,
+            "description": <permission description>,
+            "kind": <read|write|deny>,
+            "xpath": <xpath string>,
+            "reference": <cib element id>,
+            "object-type": <>,
+            "attribute": <>,
+        }
+
+    role_el -- acl_role etree element of which permissions whould be returned
+    """
+    output_list = []
+    for permission in role_el.findall("./acl_permission"):
+        output_list.append(etree_element_attibutes_to_dict(
+            permission,
+            [
+                "id", "description", "kind", "xpath", "reference",
+                "object-type", "attribute"
+            ]
+        ))
+    return output_list
+
+
+def get_target_list(tree):
+    """
+    Returns list of acl_target elements in format:
+        {
+            "id": <target id>,
+            "role_list": [<assign role_id as string>, ...]
+        }
+
+    tree -- etree node
+    """
+    return _get_target_like_list_with_tag(tree, "acl_target")
+
+
+def get_group_list(tree):
+    """
+    Returns list of acl_group elements in format:
+        {
+            "id": <group id>,
+            "role_list": [<assign role_id as string>, ...]
+        }
+
+    tree -- etree node
+    """
+    return _get_target_like_list_with_tag(tree, "acl_group")
+
+
+def _get_target_like_list_with_tag(tree, tag):
+    output_list = []
+    for target_el in get_acls(tree).findall("./{0}".format(tag)):
+        output_list.append({
+            "id": target_el.get("id"),
+            "role_list": _get_role_list_of_target(target_el),
+        })
+    return output_list
+
+
+def _get_role_list_of_target(target):
+    """
+    Returns all roles assigned to target element as list of strings.
+
+    target -- etree acl_target/acl_group element of which roles should be
+        returned
+    """
+    return [
+        role.get("id") for role in target.findall("./role") if role.get("id")
+    ]
+
+
 def remove_permissions_referencing(tree, reference):
+    """
+    Removes all permission with specified reference.
+
+    tree -- etree node
+    reference -- reference identifier
+    """
     xpath = './/acl_permission[@reference="{0}"]'.format(reference)
     for permission in tree.findall(xpath):
         permission.getparent().remove(permission)
 
+
 def dom_remove_permissions_referencing(dom, reference):
     # TODO: remove once we go fully lxml
     for permission in dom.getElementsByTagName("acl_permission"):
         if permission.getAttribute("reference") == reference:
             permission.parentNode.removeChild(permission)
+
+
+def acl_error_to_report_item(e):
+    if e.__class__ == AclTargetNotFound:
+        return reports.id_not_found(e.target_id, "user")
+    elif e.__class__ == AclGroupNotFound:
+        return reports.id_not_found(e.group_id, "group")
+    elif e.__class__ == AclRoleNotFound:
+        return reports.id_not_found(e.role_id, "role")
+    raise e
diff --git a/pcs/lib/cib/alert.py b/pcs/lib/cib/alert.py
index b5fe88c..c3a2cd9 100644
--- a/pcs/lib/cib/alert.py
+++ b/pcs/lib/cib/alert.py
@@ -6,11 +6,12 @@ from __future__ import (
 )
 
 from lxml import etree
+from functools import partial
 
 from pcs.common import report_codes
 from pcs.lib import reports
 from pcs.lib.errors import LibraryError, ReportItemSeverity as Severities
-from pcs.lib.cib.nvpair import update_nvset, get_nvset
+from pcs.lib.cib.nvpair import arrange_first_nvset, get_nvset
 from pcs.lib.cib.tools import (
     check_new_id_applicable,
     get_sub_element,
@@ -20,28 +21,11 @@ from pcs.lib.cib.tools import (
 )
 
 
-def update_instance_attributes(tree, element, attribute_dict):
-    """
-    Updates instance attributes of element. Returns updated instance
-    attributes element.
-
-    tree -- cib etree node
-    element -- parent element of instance attributes
-    attribute_dict -- dictionary of nvpairs
-    """
-    return update_nvset("instance_attributes", tree, element, attribute_dict)
-
-
-def update_meta_attributes(tree, element, attribute_dict):
-    """
-    Updates meta attributes of element. Returns updated meta attributes element.
-
-    tree -- cib etree node
-    element -- parent element of meta attributes
-    attribute_dict -- dictionary of nvpairs
-    """
-    return update_nvset("meta_attributes", tree, element, attribute_dict)
-
+update_instance_attributes = partial(
+    arrange_first_nvset,
+    "instance_attributes"
+)
+update_meta_attributes = partial(arrange_first_nvset, "meta_attributes")
 
 def _update_optional_attribute(element, attribute, value):
     """
diff --git a/pcs/lib/cib/nvpair.py b/pcs/lib/cib/nvpair.py
index d1a0cae..fad1ffa 100644
--- a/pcs/lib/cib/nvpair.py
+++ b/pcs/lib/cib/nvpair.py
@@ -9,62 +9,68 @@ from lxml import etree
 
 from pcs.lib.cib.tools import (
     get_sub_element,
-    find_unique_id,
+    create_subelement_id,
 )
 
 
-def update_nvpair(tree, element, name, value):
+def set_nvpair_in_nvset(nvset_element, name, value):
     """
     Update nvpair, create new if it doesn't yet exist or remove existing
-    nvpair if value is empty. Returns created/updated/removed nvpair element.
+    nvpair if value is empty.
 
-    tree -- cib etree node
-    element -- element in which nvpair should be added/updated/removed
+    nvset_element -- element in which nvpair should be added/updated/removed
     name -- name of nvpair
     value -- value of nvpair
     """
-    nvpair = element.find("./nvpair[@name='{0}']".format(name))
+    nvpair = nvset_element.find("./nvpair[@name='{0}']".format(name))
     if nvpair is None:
-        if not value:
-            return None
-        nvpair_id = find_unique_id(
-            tree, "{0}-{1}".format(element.get("id"), name)
-        )
-        nvpair = etree.SubElement(
-            element, "nvpair", id=nvpair_id, name=name, value=value
-        )
+        if value:
+            etree.SubElement(
+                nvset_element,
+                "nvpair",
+                id=create_subelement_id(nvset_element, name),
+                name=name,
+                value=value
+            )
     else:
         if value:
             nvpair.set("value", value)
         else:
-            # remove nvpair if value is empty
-            element.remove(nvpair)
-    return nvpair
-
+            nvset_element.remove(nvpair)
 
-def update_nvset(tag_name, tree, element, attribute_dict):
+def arrange_first_nvset(tag_name, context_element, attribute_dict):
     """
+    Arrange to context_element contains some nvset (with tag_name) with nvpairs
+    corresponing to attribute_dict.
+
+    WARNING: does not solve multiple nvset (with tag_name) under
+    context_element! Consider carefully if this is your case. Probably not.
+    There could be more than one nvset.
+    This function is DEPRECATED. Try to use update_nvset etc.
+
     This method updates nvset specified by tag_name. If specified nvset
     doesn't exist it will be created. Returns updated nvset element or None if
     attribute_dict is empty.
 
     tag_name -- tag name of nvset element
-    tree -- cib etree node
-    element -- parent element of nvset
+    context_element -- parent element of nvset
     attribute_dict -- dictionary of nvpairs
     """
     if not attribute_dict:
-        return None
+        return
 
-    attributes = get_sub_element(element, tag_name, find_unique_id(
-        tree, "{0}-{1}".format(element.get("id"), tag_name)
-    ), 0)
+    nvset_element = get_sub_element(
+        context_element,
+        tag_name,
+        create_subelement_id(context_element, tag_name),
+        new_index=0
+    )
 
-    for name, value in sorted(attribute_dict.items()):
-        update_nvpair(tree, attributes, name, value)
-
-    return attributes
+    update_nvset(nvset_element, attribute_dict)
 
+def update_nvset(nvset_element, attribute_dict):
+    for name, value in sorted(attribute_dict.items()):
+        set_nvpair_in_nvset(nvset_element, name, value)
 
 def get_nvset(nvset):
     """
diff --git a/pcs/lib/cib/test/test_alert.py b/pcs/lib/cib/test/test_alert.py
index c47dd1e..7739f2b 100644
--- a/pcs/lib/cib/test/test_alert.py
+++ b/pcs/lib/cib/test/test_alert.py
@@ -17,44 +17,9 @@ from pcs.test.tools.assertions import(
     assert_xml_equal,
     assert_report_item_list_equal,
 )
-from pcs.test.tools.pcs_unittest import mock
 from pcs.test.tools.custom_mock import MockLibraryReportProcessor
 
 
- at mock.patch("pcs.lib.cib.alert.update_nvset")
-class UpdateInstanceAttributesTest(TestCase):
-    def test_success(self, mock_update_nvset):
-        ret_val = etree.Element("nvset")
-        tree = etree.Element("tree")
-        element = etree.Element("element")
-        attributes = {"a": 1}
-        mock_update_nvset.return_value = ret_val
-        self.assertEqual(
-            alert.update_instance_attributes(tree, element, attributes),
-            ret_val
-        )
-        mock_update_nvset.assert_called_once_with(
-            "instance_attributes", tree, element, attributes
-        )
-
-
- at mock.patch("pcs.lib.cib.alert.update_nvset")
-class UpdateMetaAttributesTest(TestCase):
-    def test_success(self, mock_update_nvset):
-        ret_val = etree.Element("nvset")
-        tree = etree.Element("tree")
-        element = etree.Element("element")
-        attributes = {"a": 1}
-        mock_update_nvset.return_value = ret_val
-        self.assertEqual(
-            alert.update_meta_attributes(tree, element, attributes),
-            ret_val
-        )
-        mock_update_nvset.assert_called_once_with(
-            "meta_attributes", tree, element, attributes
-        )
-
-
 class UpdateOptionalAttributeTest(TestCase):
     def test_add(self):
         element = etree.Element("element")
@@ -368,7 +333,7 @@ class CreateAlertTest(TestCase):
                     "id": "1alert",
                     "id_description": "alert-id",
                     "invalid_character": "1",
-                    "reason": "invalid first character"
+                    "is_first_char": True,
                 }
             )
         )
diff --git a/pcs/lib/cib/test/test_nvpair.py b/pcs/lib/cib/test/test_nvpair.py
index 56ba4d1..0d18a7e 100644
--- a/pcs/lib/cib/test/test_nvpair.py
+++ b/pcs/lib/cib/test/test_nvpair.py
@@ -5,15 +5,54 @@ from __future__ import (
     unicode_literals,
 )
 
-from pcs.test.tools.pcs_unittest import TestCase
-
 from lxml import etree
 
 from pcs.lib.cib import nvpair
 from pcs.test.tools.assertions import assert_xml_equal
+from pcs.test.tools.pcs_unittest import TestCase, mock
 
 
-class UpdateNvpairTest(TestCase):
+class UpdateNvsetTest(TestCase):
+    @mock.patch(
+        "pcs.lib.cib.nvpair.create_subelement_id",
+        mock.Mock(return_value="4")
+    )
+    def test_updates_nvset(self):
+        nvset_element = etree.fromstring("""
+            <instance_attributes id="iattrs">
+                <nvpair id="1" name="a" value="b"/>
+                <nvpair id="2" name="c" value="d"/>
+                <nvpair id="3" name="e" value="f"/>
+            </instance_attributes>
+        """)
+        nvpair.update_nvset(nvset_element, {
+            "a": "B",
+            "c": "",
+            "g": "h",
+        })
+        assert_xml_equal(
+            """
+            <instance_attributes id="iattrs">
+                <nvpair id="1" name="a" value="B"/>
+                <nvpair id="3" name="e" value="f"/>
+                <nvpair id="4" name="g" value="h"/>
+            </instance_attributes>
+            """,
+            etree.tostring(nvset_element).decode()
+        )
+    def test_empty_value_has_no_effect(self):
+        xml = """
+            <instance_attributes id="iattrs">
+                <nvpair id="1" name="a" value="b"/>
+                <nvpair id="2" name="c" value="d"/>
+                <nvpair id="3" name="e" value="f"/>
+            </instance_attributes>
+        """
+        nvset_element = etree.fromstring(xml)
+        nvpair.update_nvset(nvset_element, {})
+        assert_xml_equal(xml, etree.tostring(nvset_element).decode())
+
+class SetNvpairInNvsetTest(TestCase):
     def setUp(self):
         self.nvset = etree.Element("nvset", id="nvset")
         etree.SubElement(
@@ -27,12 +66,7 @@ class UpdateNvpairTest(TestCase):
         )
 
     def test_update(self):
-        assert_xml_equal(
-            "<nvpair id='nvset-attr' name='attr' value='10'/>",
-            etree.tostring(
-                nvpair.update_nvpair(self.nvset, self.nvset, "attr", "10")
-            ).decode()
-        )
+        nvpair.set_nvpair_in_nvset(self.nvset, "attr", "10")
         assert_xml_equal(
             """
             <nvset id="nvset">
@@ -45,12 +79,7 @@ class UpdateNvpairTest(TestCase):
         )
 
     def test_add(self):
-        assert_xml_equal(
-            "<nvpair id='nvset-test-1' name='test' value='0'/>",
-            etree.tostring(
-                nvpair.update_nvpair(self.nvset, self.nvset, "test", "0")
-            ).decode()
-        )
+        nvpair.set_nvpair_in_nvset(self.nvset, "test", "0")
         assert_xml_equal(
             """
             <nvset id="nvset">
@@ -64,12 +93,7 @@ class UpdateNvpairTest(TestCase):
         )
 
     def test_remove(self):
-        assert_xml_equal(
-            "<nvpair id='nvset-attr2' name='attr2' value='2'/>",
-            etree.tostring(
-                nvpair.update_nvpair(self.nvset, self.nvset, "attr2", "")
-            ).decode()
-        )
+        nvpair.set_nvpair_in_nvset(self.nvset, "attr2", "")
         assert_xml_equal(
             """
             <nvset id="nvset">
@@ -81,9 +105,7 @@ class UpdateNvpairTest(TestCase):
         )
 
     def test_remove_not_existing(self):
-        self.assertTrue(
-            nvpair.update_nvpair(self.nvset, self.nvset, "attr3", "") is None
-        )
+        nvpair.set_nvpair_in_nvset(self.nvset, "attr3", "")
         assert_xml_equal(
             """
             <nvset id="nvset">
@@ -96,7 +118,7 @@ class UpdateNvpairTest(TestCase):
         )
 
 
-class UpdateNvsetTest(TestCase):
+class ArrangeSomeNvsetTest(TestCase):
     def setUp(self):
         self.root = etree.Element("root", id="root")
         self.nvset = etree.SubElement(self.root, "nvset", id="nvset")
@@ -110,55 +132,47 @@ class UpdateNvsetTest(TestCase):
             self.nvset, "notnvpair", id="nvset-test", name="test", value="0"
         )
 
-    def test_None(self):
-        self.assertTrue(
-            nvpair.update_nvset("nvset", self.root, self.root, None) is None
-        )
-
-    def test_empty(self):
-        self.assertTrue(
-            nvpair.update_nvset("nvset", self.root, self.root, {}) is None
-        )
-
-    def test_existing(self):
-        self.assertEqual(
-            self.nvset,
-            nvpair.update_nvset("nvset", self.root, self.root, {
-                "attr": "10",
-                "new_one": "20",
-                "test": "0",
-                "attr2": ""
-            })
-        )
+    def test_empty_value_has_no_effect(self):
+        nvpair.arrange_first_nvset("nvset", self.root, {})
         assert_xml_equal(
             """
-            <nvset id="nvset">
-                <nvpair id="nvset-attr" name="attr" value="10"/>
-                <notnvpair id="nvset-test" name="test" value="0"/>
-                <nvpair id="nvset-new_one" name="new_one" value="20"/>
-                <nvpair id="nvset-test-1" name="test" value="0"/>
-            </nvset>
+                <nvset id="nvset">
+                    <nvpair id="nvset-attr" name="attr" value="1"/>
+                    <nvpair id="nvset-attr2" name="attr2" value="2"/>
+                    <notnvpair id="nvset-test" name="test" value="0"/>
+                </nvset>
             """,
             etree.tostring(self.nvset).decode()
         )
 
-    def test_new(self):
-        root = etree.Element("root", id="root")
+    def test_update_existing_nvset(self):
+        nvpair.arrange_first_nvset("nvset", self.root, {
+            "attr": "10",
+            "new_one": "20",
+            "test": "0",
+            "attr2": ""
+        })
         assert_xml_equal(
             """
-            <nvset id="root-nvset">
-                <nvpair id="root-nvset-attr" name="attr" value="10"/>
-                <nvpair id="root-nvset-new_one" name="new_one" value="20"/>
-                <nvpair id="root-nvset-test" name="test" value="0"/>
-            </nvset>
+                <nvset id="nvset">
+                    <nvpair id="nvset-attr" name="attr" value="10"/>
+                    <notnvpair id="nvset-test" name="test" value="0"/>
+                    <nvpair id="nvset-new_one" name="new_one" value="20"/>
+                    <nvpair id="nvset-test-1" name="test" value="0"/>
+                </nvset>
             """,
-            etree.tostring(nvpair.update_nvset("nvset", root, root, {
-                "attr": "10",
-                "new_one": "20",
-                "test": "0",
-                "attr2": ""
-            })).decode()
+            etree.tostring(self.nvset).decode()
         )
+
+    def test_create_new_nvset_if_does_not_exist(self):
+        root = etree.Element("root", id="root")
+        nvpair.arrange_first_nvset("nvset", root, {
+            "attr": "10",
+            "new_one": "20",
+            "test": "0",
+            "attr2": ""
+        })
+
         assert_xml_equal(
             """
             <root id="root">
diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py
index 6285931..06a9671 100644
--- a/pcs/lib/cib/tools.py
+++ b/pcs/lib/cib/tools.py
@@ -27,9 +27,10 @@ def does_id_exist(tree, check_id):
     # do not search in /cib/status, it may contain references to previously
     # existing and deleted resources and thus preventing creating them again
     existing = root.xpath(
-        '(/cib/*[name()!="status"]|/*[name()!="cib"])//*[@id="{0}"]'.format(
-            check_id
-        )
+        (
+            '(/cib/*[name()!="status"]|/*[name()!="cib"])' +
+            '//*[name()!="acl_target" and name()!="role" and @id="{0}"]'
+        ).format(check_id)
     )
     return len(existing) > 0
 
@@ -54,6 +55,12 @@ def find_unique_id(tree, check_id):
         counter += 1
     return temp_id
 
+def create_subelement_id(context_element, suffix):
+    return find_unique_id(
+        context_element,
+        "{0}-{1}".format(context_element.get("id"), suffix)
+    )
+
 def check_new_id_applicable(tree, description, id):
     validate_id(id, description)
     validate_id_does_not_exist(tree, id)
@@ -121,7 +128,7 @@ def export_attributes(element):
 
 def get_sub_element(element, sub_element_tag, new_id=None, new_index=None):
     """
-    Returns sub-element sub_element_tag of element. It will create new
+    Returns the FIRST sub-element sub_element_tag of element. It will create new
     element if such doesn't exist yet. Id of new element will be new_if if
     it's not None. new_index specify where will be new element added, if None
     it will be appended.
@@ -235,3 +242,17 @@ def ensure_cib_version(runner, cib, version):
     raise LibraryError(reports.unable_to_upgrade_cib_to_required_version(
         current_version, version
     ))
+
+
+def etree_element_attibutes_to_dict(etree_el, required_key_list):
+    """
+    Returns all attributes of etree_el from required_key_list in dictionary,
+    where keys are attributes and values are values of attributes or None if
+    it's not present.
+
+    etree_el -- etree element from which attributes should be extracted
+    required_key_list -- list of strings, attributes names which should be
+        extracted
+    """
+    return dict([(key, etree_el.get(key)) for key in required_key_list])
+
diff --git a/pcs/lib/cluster_conf_facade.py b/pcs/lib/cluster_conf_facade.py
new file mode 100644
index 0000000..5ebc0e8
--- /dev/null
+++ b/pcs/lib/cluster_conf_facade.py
@@ -0,0 +1,59 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.lib import reports
+from pcs.lib.errors import LibraryError
+from pcs.lib.node import NodeAddresses, NodeAddressesList
+
+class ClusterConfFacade(object):
+    """
+    Provides high level access to a corosync.conf file
+    """
+
+    @classmethod
+    def from_string(cls, config_string):
+        """
+        Parse cluster.conf config and create a facade around it
+
+        config_string -- cluster.conf file content as string
+        """
+        try:
+            return cls(etree.fromstring(config_string))
+        except (etree.XMLSyntaxError, etree.DocumentInvalid) as e:
+            raise LibraryError(reports.cluster_conf_invalid_format(str(e)))
+
+    def __init__(self, parsed_config):
+        """
+        Create a facade around a parsed cluster.conf config file
+        parsed_config parsed cluster.conf config
+        """
+        self._config = parsed_config
+
+    @property
+    def config(self):
+        return self._config
+
+    def get_cluster_name(self):
+        return self.config.get("name", "")
+
+    def get_nodes(self):
+        """
+        Get all defined nodes
+        """
+        result = NodeAddressesList()
+        for node in self.config.findall("./clusternodes/clusternode"):
+            altname = node.find("altname")
+            result.append(NodeAddresses(
+                ring0=node.get("name"),
+                ring1=altname.get("name") if altname is not None else None,
+                name=None,
+                id=node.get("nodeid")
+            ))
+        return result
+
diff --git a/pcs/lib/commands/acl.py b/pcs/lib/commands/acl.py
new file mode 100644
index 0000000..276f0b3
--- /dev/null
+++ b/pcs/lib/commands/acl.py
@@ -0,0 +1,333 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.lib import reports
+from pcs.lib.cib import acl
+from pcs.lib.errors import LibraryError
+
+
+REQUIRED_CIB_VERSION = (2, 0, 0)
+
+
+def create_role(lib_env, role_id, permission_info_list, description):
+    """
+    Create new acl role.
+    Raises LibraryError on any failure.
+
+    lib_env -- LibraryEnvirnoment
+    role_id -- id of new role which should be created
+    permission_info_list -- list of permissons, items of list should be tuples:
+        (<read|write|deny>, <xpath|id>, <any string>)
+    description -- text description for role
+    """
+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+
+    if permission_info_list:
+        acl.validate_permissions(cib, permission_info_list)
+    role_el = acl.create_role(cib, role_id, description)
+    if permission_info_list:
+        acl.add_permissions_to_role(role_el, permission_info_list)
+
+    lib_env.push_cib(cib)
+
+
+def remove_role(lib_env, role_id, autodelete_users_groups=False):
+    """
+    Remove role with specified id from CIB.
+    Raises LibraryError on any failure.
+
+    lib_env -- LibraryEnvironment
+    role_id -- id of role which should be deleted
+    autodelete_users_groups -- if True targets and groups which are empty after
+        removal will be removed
+    """
+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+    try:
+        acl.remove_role(cib, role_id, autodelete_users_groups)
+    except acl.AclRoleNotFound as e:
+        raise LibraryError(acl.acl_error_to_report_item(e))
+    lib_env.push_cib(cib)
+
+
+def assign_role_not_specific(lib_env, role_id, target_or_group_id):
+    """
+    Assign role wth id role_id to target or group with id target_or_group_id.
+    Target element has bigger pririty so if there are target and group with same
+    id only target element will be affected by this function.
+    Raises LibraryError on any failure.
+
+    lib_env -- LibraryEnviroment
+    role_id -- id of role which should be assigne to target/group
+    target_or_group_id -- id of target/group element
+    """
+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+    try:
+        acl.assign_role(
+            _get_target_or_group(cib, target_or_group_id),
+            acl.find_role(cib, role_id)
+        )
+    except acl.AclError as e:
+        raise LibraryError(acl.acl_error_to_report_item(e))
+    lib_env.push_cib(cib)
+
+
+def _get_target_or_group(cib, target_or_group_id):
+    """
+    Returns acl_target or acl_group element with id target_or_group_id. Target
+    element has bigger pririty so if there are target and group with same id
+    only target element will be affected by this function.
+    Raises LibraryError if there is no target or group element with
+    specified id.
+
+    cib -- cib etree node
+    target_or_group_id -- id of target/group element which should be returned
+    """
+    try:
+        return acl.find_target(cib, target_or_group_id)
+    except acl.AclTargetNotFound:
+        try:
+            return acl.find_group(cib, target_or_group_id)
+        except acl.AclGroupNotFound:
+            raise LibraryError(
+                reports.id_not_found(target_or_group_id, "user/group")
+            )
+
+def assign_role_to_target(lib_env, role_id, target_id):
+    """
+    Assign role with id role_id to target with id target_id.
+    Raises LibraryError on any failure.
+
+    lib_env -- LibraryEnvironment
+    role_id -- id of acl_role element which should be assigned to target
+    target_id -- id of acl_target element to which role should be assigned
+    """
+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+    try:
+        acl.assign_role(
+            acl.find_target(cib, target_id), acl.find_role(cib, role_id)
+        )
+    except acl.AclError as e:
+        raise LibraryError(acl.acl_error_to_report_item(e))
+    lib_env.push_cib(cib)
+
+
+def assign_role_to_group(lib_env, role_id, group_id):
+    """
+    Assign role with id role_id to group with id group_id.
+    Raises LibraryError on any failure.
+
+    lib_env -- LibraryEnvironment
+    role_id -- id of acl_role element which should be assigned to group
+    group_id -- id of acl_group element to which role should be assigned
+    """
+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+    try:
+        acl.assign_role(
+            acl.find_group(cib, group_id), acl.find_role(cib, role_id)
+        )
+    except acl.AclError as e:
+        raise LibraryError(acl.acl_error_to_report_item(e))
+    lib_env.push_cib(cib)
+
+
+def unassign_role_not_specific(
+    lib_env, role_id, target_or_group_id, autodelete_target_group=False
+):
+    """
+    Unassign role with role_id from target/group with id target_or_group_id.
+    Target element has bigger pririty so if there are target and group with same
+    id only target element will be affected by this function.
+    Raises LibraryError on any failure.
+
+    lib_env -- LibraryEnvironment
+    role_id -- id of role which should be unassigned from target/group
+    target_or_group_id -- id of acl_target/acl_group element
+    autodelete_target_group -- if True remove target/group element if has no
+        more role assigned
+    """
+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+    acl.unassign_role(
+        _get_target_or_group(cib, target_or_group_id),
+        role_id,
+        autodelete_target_group
+    )
+    lib_env.push_cib(cib)
+
+
+def unassign_role_from_target(
+    lib_env, role_id, target_id, autodelete_target=False
+):
+    """
+    Unassign role with role_id from group with id target_id.
+    Raises LibraryError on any failure.
+
+    lib_env -- LibraryEnvironment
+    role_id -- id of role which should be unassigned from target
+    target_id -- id of acl_target element
+    autodelete_target -- if True remove target element if has no more role
+        assigned
+    """
+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+    try:
+        acl.unassign_role(
+            acl.find_target(cib, target_id),
+            role_id,
+            autodelete_target
+        )
+    except acl.AclError as e:
+        raise LibraryError(acl.acl_error_to_report_item(e))
+    lib_env.push_cib(cib)
+
+
+def unassign_role_from_group(
+    lib_env, role_id, group_id, autodelete_group=False
+):
+    """
+    Unassign role with role_id from group with id group_id.
+    Raises LibraryError on any failure.
+
+    lib_env -- LibraryEnvironment
+    role_id -- id of role which should be unassigned from group
+    group_id -- id of acl_group element
+    autodelete_target -- if True remove group element if has no more role
+        assigned
+    """
+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+    try:
+        acl.unassign_role(
+            acl.find_group(cib, group_id),
+            role_id,
+            autodelete_group
+        )
+    except acl.AclError as e:
+        raise LibraryError(acl.acl_error_to_report_item(e))
+    lib_env.push_cib(cib)
+
+
+def _assign_roles_to_element(cib, element, role_id_list):
+    """
+    Assign roles from role_id_list to element.
+    Raises LibraryError on any failure.
+
+    cib -- cib etree node
+    element -- element to which specified roles should be assigned
+    role_id_list -- list of role id
+    """
+    report_list = []
+    for role_id in role_id_list:
+        try:
+            acl.assign_role(element, acl.find_role(cib, role_id))
+        except acl.AclError as e:
+            report_list.append(acl.acl_error_to_report_item(e))
+    if report_list:
+        raise LibraryError(*report_list)
+
+
+def create_target(lib_env, target_id, role_list):
+    """
+    Create new target with id target_id and assign roles role_list to it.
+    Raises LibraryError on any failure.
+
+    lib_env -- LibraryEnvironment
+    target_id -- id of new target
+    role_list -- list of roles to assign to new target
+    """
+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+    _assign_roles_to_element(cib, acl.create_target(cib, target_id), role_list)
+    lib_env.push_cib(cib)
+
+
+def create_group(lib_env, group_id, role_list):
+    """
+    Create new group with id group_id and assign roles role_list to it.
+    Raises LibraryError on any failure.
+
+    lib_env -- LibraryEnvironment
+    group_id -- id of new group
+    role_list -- list of roles to assign to new group
+    """
+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+    _assign_roles_to_element(cib, acl.create_group(cib, group_id), role_list)
+    lib_env.push_cib(cib)
+
+
+def remove_target(lib_env, target_id):
+    """
+    Remove acl_target element with id target_id.
+    Raises LibraryError on any failure.
+
+    lib_env -- LibraryEnvironment
+    target_id -- id of taget which should be removed
+    """
+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+    acl.remove_target(cib, target_id)
+    lib_env.push_cib(cib)
+
+
+def remove_group(lib_env, group_id):
+    """
+    Remove acl_group element with id group_id.
+    Raises LibraryError on any failure.
+
+    lib_env -- LibraryEnvironment
+    group_id -- id of group which should be removed
+    """
+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+    acl.remove_group(cib, group_id)
+    lib_env.push_cib(cib)
+
+
+def add_permission(lib_env, role_id, permission_info_list):
+    """
+    Add permissions do role with id role_id. If role doesn't exist it will be
+    created.
+    Raises LibraryError on any failure.
+
+    lib_env -- LibraryEnvirnoment
+    role_id -- id of role
+    permission_info_list -- list of permissons, items of list should be tuples:
+        (<read|write|deny>, <xpath|id>, <any string>)
+    """
+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+    acl.validate_permissions(cib, permission_info_list)
+    acl.add_permissions_to_role(
+        acl.provide_role(cib, role_id), permission_info_list
+    )
+    lib_env.push_cib(cib)
+
+
+def remove_permission(lib_env, permission_id):
+    """
+    Remove permission with id permission_id.
+    Raises LibraryError on any failure.
+
+    lib_env -- LibraryEnvironment
+    permission_id -- id of permission element which should be removed
+    """
+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+    acl.remove_permission(cib, permission_id)
+    lib_env.push_cib(cib)
+
+
+def get_config(lib_env):
+    """
+    Returns ACL configuration in disctionary. Fromat of output:
+        {
+            "target_list": <list of targets>,
+            "group_list": <list og groups>,
+            "role_list": <list of roles>,
+        }
+
+    lib_env -- LibraryEnvironment
+    """
+    cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
+    return {
+        "target_list": acl.get_target_list(cib),
+        "group_list": acl.get_group_list(cib),
+        "role_list": acl.get_role_list(cib),
+    }
+
diff --git a/pcs/lib/commands/alert.py b/pcs/lib/commands/alert.py
index 432d9d5..1ae5405 100644
--- a/pcs/lib/commands/alert.py
+++ b/pcs/lib/commands/alert.py
@@ -38,8 +38,8 @@ def create_alert(
     cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
 
     alert_el = alert.create_alert(cib, alert_id, path, description)
-    alert.update_instance_attributes(cib, alert_el, instance_attribute_dict)
-    alert.update_meta_attributes(cib, alert_el, meta_attribute_dict)
+    alert.update_instance_attributes(alert_el, instance_attribute_dict)
+    alert.update_meta_attributes(alert_el, meta_attribute_dict)
 
     lib_env.push_cib(cib)
 
@@ -66,21 +66,27 @@ def update_alert(
     cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
 
     alert_el = alert.update_alert(cib, alert_id, path, description)
-    alert.update_instance_attributes(cib, alert_el, instance_attribute_dict)
-    alert.update_meta_attributes(cib, alert_el, meta_attribute_dict)
+    alert.update_instance_attributes(alert_el, instance_attribute_dict)
+    alert.update_meta_attributes(alert_el, meta_attribute_dict)
 
     lib_env.push_cib(cib)
 
 
-def remove_alert(lib_env, alert_id):
+def remove_alert(lib_env, alert_id_list):
     """
-    Remove alert with specified id.
+    Remove alerts with specified ids.
 
     lib_env -- LibraryEnvironment
-    alert_id -- id of alert which should be removed
+    alert_id_list -- list of alerts ids which should be removed
     """
     cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
-    alert.remove_alert(cib, alert_id)
+    report_list = []
+    for alert_id in alert_id_list:
+        try:
+            alert.remove_alert(cib, alert_id)
+        except LibraryError as e:
+            report_list += e.args
+    lib_env.report_processor.process_list(report_list)
     lib_env.push_cib(cib)
 
 
@@ -121,8 +127,8 @@ def add_recipient(
         description=description,
         allow_same_value=allow_same_value
     )
-    alert.update_instance_attributes(cib, recipient, instance_attribute_dict)
-    alert.update_meta_attributes(cib, recipient, meta_attribute_dict)
+    alert.update_instance_attributes(recipient, instance_attribute_dict)
+    alert.update_meta_attributes(recipient, meta_attribute_dict)
 
     lib_env.push_cib(cib)
 
@@ -162,21 +168,27 @@ def update_recipient(
         description=description,
         allow_same_value=allow_same_value
     )
-    alert.update_instance_attributes(cib, recipient, instance_attribute_dict)
-    alert.update_meta_attributes(cib, recipient, meta_attribute_dict)
+    alert.update_instance_attributes(recipient, instance_attribute_dict)
+    alert.update_meta_attributes(recipient, meta_attribute_dict)
 
     lib_env.push_cib(cib)
 
 
-def remove_recipient(lib_env, recipient_id):
+def remove_recipient(lib_env, recipient_id_list):
     """
-    Remove existing recipient.
+    Remove specified recipients.
 
     lib_env -- LibraryEnvironment
-    recipient_id -- if of recipient to be removed
+    recipient_id_list -- list of recipients ids to be removed
     """
     cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
-    alert.remove_recipient(cib, recipient_id)
+    report_list = []
+    for recipient_id in recipient_id_list:
+        try:
+            alert.remove_recipient(cib, recipient_id)
+        except LibraryError as e:
+            report_list += e.args
+    lib_env.report_processor.process_list(report_list)
     lib_env.push_cib(cib)
 
 
diff --git a/pcs/lib/commands/resource_agent.py b/pcs/lib/commands/resource_agent.py
new file mode 100644
index 0000000..933da49
--- /dev/null
+++ b/pcs/lib/commands/resource_agent.py
@@ -0,0 +1,115 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.lib import resource_agent
+
+
+def list_standards(lib_env):
+    """
+    List resource agents standards (ocf, lsb, ... ) on the local host
+    """
+    return resource_agent.list_resource_agents_standards(lib_env.cmd_runner())
+
+
+def list_ocf_providers(lib_env):
+    """
+    List resource agents ocf providers on the local host
+    """
+    return resource_agent.list_resource_agents_ocf_providers(
+        lib_env.cmd_runner()
+    )
+
+
+def list_agents_for_standard_and_provider(lib_env, standard_provider=None):
+    """
+    List resource agents for specified standard on the local host
+    string standard_provider standard[:provider], e.g. None, ocf, ocf:pacemaker
+    """
+    if standard_provider:
+        standards = [standard_provider]
+    else:
+        standards = resource_agent.list_resource_agents_standards(
+            lib_env.cmd_runner()
+        )
+    agents = []
+    for std in standards:
+        agents += resource_agent.list_resource_agents(lib_env.cmd_runner(), std)
+    return sorted(
+        agents,
+        # works with both str and unicode in both python 2 and 3
+        key=lambda x: x.lower()
+    )
+
+
+def list_agents(lib_env, describe=True, search=None):
+    """
+    List all resource agents on the local host, optionally filtered and
+        described
+    bool describe load and return agents' description as well
+    string search return only agents which name contains this string
+    """
+    runner = lib_env.cmd_runner()
+
+    # list agents for all standards and providers
+    agent_names = []
+    for std in resource_agent.list_resource_agents_standards_and_providers(
+        runner
+    ):
+        agent_names += [
+            "{0}:{1}".format(std, agent)
+            for agent in resource_agent.list_resource_agents(runner, std)
+        ]
+    agent_names.sort(
+        # works with both str and unicode in both python 2 and 3
+        key=lambda x: x.lower()
+    )
+    return _complete_agent_list(
+        runner,
+        agent_names,
+        describe,
+        search,
+        resource_agent.ResourceAgent
+    )
+
+
+def _complete_agent_list(
+    runner, agent_names, describe, search, metadata_class
+):
+    # filter agents by name if requested
+    if search:
+        search_lower = search.lower()
+        agent_names = [
+            name for name in agent_names if search_lower in name.lower()
+        ]
+
+    # complete the output and load descriptions if requested
+    agent_list = []
+    for name in agent_names:
+        try:
+            agent_metadata = metadata_class(runner, name)
+            if describe:
+                agent_list.append(agent_metadata.get_description_info())
+            else:
+                agent_list.append(agent_metadata.get_name_info())
+        except resource_agent.UnableToGetAgentMetadata:
+            # if we cannot get valid metadata, it's not a resource agent and
+            # we don't return it in the list
+            pass
+    return agent_list
+
+
+def describe_agent(lib_env, agent_name):
+    """
+    Get agent's description (metadata) in a structure
+    string agent_name name of the agent
+    """
+    agent = resource_agent.find_valid_resource_agent_by_name(
+        lib_env.report_processor,
+        lib_env.cmd_runner(),
+        agent_name,
+    )
+    return agent.get_full_info()
diff --git a/pcs/lib/commands/sbd.py b/pcs/lib/commands/sbd.py
index 2acb104..8cc9eda 100644
--- a/pcs/lib/commands/sbd.py
+++ b/pcs/lib/commands/sbd.py
@@ -125,8 +125,6 @@ def enable_sbd(
     allow_unknown_opts -- if True, accept also unknown options.
     ignore_offline_nodes -- if True, omit offline nodes
     """
-    __ensure_not_cman(lib_env)
-
     node_list = _get_cluster_nodes(lib_env)
 
     if not default_watchdog:
@@ -158,13 +156,14 @@ def enable_sbd(
     )
 
     # enable ATB if needed
-    corosync_conf = lib_env.get_corosync_conf()
-    if sbd.atb_has_to_be_enabled_pre_enable_check(corosync_conf):
-        lib_env.report_processor.process(reports.sbd_requires_atb())
-        corosync_conf.set_quorum_options(
-            lib_env.report_processor, {"auto_tie_breaker": "1"}
-        )
-        lib_env.push_corosync_conf(corosync_conf, ignore_offline_nodes)
+    if not lib_env.is_cman_cluster:
+        corosync_conf = lib_env.get_corosync_conf()
+        if sbd.atb_has_to_be_enabled_pre_enable_check(corosync_conf):
+            lib_env.report_processor.process(reports.sbd_requires_atb())
+            corosync_conf.set_quorum_options(
+                lib_env.report_processor, {"auto_tie_breaker": "1"}
+            )
+            lib_env.push_corosync_conf(corosync_conf, ignore_offline_nodes)
 
     # distribute SBD configuration
     config = sbd.get_default_sbd_config()
@@ -199,12 +198,18 @@ def disable_sbd(lib_env, ignore_offline_nodes=False):
     lib_env -- LibraryEnvironment
     ignore_offline_nodes -- if True, omit offline nodes
     """
-    __ensure_not_cman(lib_env)
-
     node_list = _get_online_nodes(
         lib_env, _get_cluster_nodes(lib_env), ignore_offline_nodes
     )
 
+    if lib_env.is_cman_cluster:
+        nodes_task.check_corosync_offline_on_nodes(
+            lib_env.node_communicator(),
+            lib_env.report_processor,
+            node_list,
+            ignore_offline_nodes
+        )
+
     sbd.set_stonith_watchdog_timeout_to_zero_on_all_nodes(
         lib_env.node_communicator(), node_list
     )
@@ -214,9 +219,10 @@ def disable_sbd(lib_env, ignore_offline_nodes=False):
         node_list
     )
 
-    lib_env.report_processor.process(
-        reports.cluster_restart_required_to_apply_changes()
-    )
+    if not lib_env.is_cman_cluster:
+        lib_env.report_processor.process(
+            reports.cluster_restart_required_to_apply_changes()
+        )
 
 
 def _get_online_nodes(lib_env, node_list, ignore_offline_nodes=False):
@@ -266,8 +272,6 @@ def get_cluster_sbd_status(lib_env):
 
     lib_env -- LibraryEnvironment
     """
-    __ensure_not_cman(lib_env)
-
     node_list = _get_cluster_nodes(lib_env)
     report_item_list = []
     successful_node_list = []
@@ -283,9 +287,14 @@ def get_cluster_sbd_status(lib_env):
             })
             successful_node_list.append(node)
         except NodeCommunicationException as e:
+            report_item_list.append(node_communicator_exception_to_report_item(
+                e,
+                severity=Severities.WARNING
+            ))
             report_item_list.append(reports.unable_to_get_sbd_status(
                 node.label,
-                node_communicator_exception_to_report_item(e).message
+                "", #reason is in previous report item
+                #warning is there implicit
             ))
         except (ValueError, KeyError) as e:
             report_item_list.append(reports.unable_to_get_sbd_status(
@@ -324,8 +333,6 @@ def get_cluster_sbd_config(lib_env):
 
     lib_env -- LibraryEnvironment
     """
-    __ensure_not_cman(lib_env)
-
     node_list = _get_cluster_nodes(lib_env)
     config_list = []
     successful_node_list = []
@@ -347,9 +354,13 @@ def get_cluster_sbd_config(lib_env):
                 Severities.WARNING
             ))
         except NodeCommunicationException as e:
+            report_item_list.append(node_communicator_exception_to_report_item(
+                e,
+                severity=Severities.WARNING
+            ))
             report_item_list.append(reports.unable_to_get_sbd_config(
                 node.label,
-                node_communicator_exception_to_report_item(e).message,
+                "", #reason is in previous report item
                 Severities.WARNING
             ))
 
@@ -369,14 +380,12 @@ def get_cluster_sbd_config(lib_env):
 
 
 def get_local_sbd_config(lib_env):
-    __ensure_not_cman(lib_env)
     return environment_file_to_dict(sbd.get_local_sbd_config())
 
 
 def _get_cluster_nodes(lib_env):
-    return lib_env.get_corosync_conf().get_nodes()
-
-
-def __ensure_not_cman(lib_env):
     if lib_env.is_cman_cluster:
-        raise LibraryError(reports.cman_unsupported_command())
+        return lib_env.get_cluster_conf().get_nodes()
+    else:
+        return lib_env.get_corosync_conf().get_nodes()
+
diff --git a/pcs/lib/commands/stonith_agent.py b/pcs/lib/commands/stonith_agent.py
new file mode 100644
index 0000000..6257f18
--- /dev/null
+++ b/pcs/lib/commands/stonith_agent.py
@@ -0,0 +1,45 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.lib import resource_agent
+from pcs.lib.commands.resource_agent import _complete_agent_list
+from pcs.lib.errors import LibraryError
+
+
+def list_agents(lib_env, describe=True, search=None):
+    """
+    List all stonith agents on the local host, optionally filtered and described
+    bool describe load and return agents' description as well
+    string search return only agents which name contains this string
+    """
+    runner = lib_env.cmd_runner()
+    agent_names = resource_agent.list_stonith_agents(runner)
+    return _complete_agent_list(
+        runner,
+        agent_names,
+        describe,
+        search,
+        resource_agent.StonithAgent
+    )
+
+
+def describe_agent(lib_env, agent_name):
+    """
+    Get agent's description (metadata) in a structure
+    string agent_name name of the agent (not containing "stonith:" prefix)
+    """
+    try:
+        metadata = resource_agent.StonithAgent(
+            lib_env.cmd_runner(),
+            agent_name
+        )
+        return metadata.get_full_info()
+    except resource_agent.ResourceAgentError as e:
+        raise LibraryError(
+            resource_agent.resource_agent_error_to_report_item(e)
+        )
+
diff --git a/pcs/lib/commands/test/test_acl.py b/pcs/lib/commands/test/test_acl.py
new file mode 100644
index 0000000..e756c6e
--- /dev/null
+++ b/pcs/lib/commands/test/test_acl.py
@@ -0,0 +1,510 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+
+from pcs.test.tools.assertions import (
+    assert_raise_library_error,
+    ExtendedAssertionsMixin,
+)
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.pcs_unittest import mock, TestCase
+
+from pcs.common import report_codes
+from pcs.lib.errors import (
+    LibraryError,
+    ReportItemSeverity as Severities,
+)
+from pcs.lib.env import LibraryEnvironment
+
+import pcs.lib.commands.acl as cmd_acl
+import pcs.lib.cib.acl as acl_lib
+
+
+REQUIRED_CIB_VERSION = (2, 0, 0)
+
+
+class AclCommandsTest(TestCase, ExtendedAssertionsMixin):
+    def setUp(self):
+        self.mock_rep = MockLibraryReportProcessor()
+        self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment)
+        self.mock_env.report_processor = self.mock_rep
+        self.cib = "cib"
+        self.mock_env.get_cib.return_value = self.cib
+
+    def assert_get_cib_called(self):
+        self.mock_env.get_cib.assert_called_once_with(REQUIRED_CIB_VERSION)
+
+    def assert_same_cib_pushed(self):
+        self.mock_env.push_cib.assert_called_once_with(self.cib)
+
+    def assert_cib_not_pushed(self):
+        self.assertEqual(0, self.mock_env.push_cib.call_count)
+
+
+
+ at mock.patch("pcs.lib.cib.acl.validate_permissions")
+ at mock.patch("pcs.lib.cib.acl.create_role")
+ at mock.patch("pcs.lib.cib.acl.add_permissions_to_role")
+class CreateRoleTest(AclCommandsTest):
+    def test_success(self, mock_add_perm, mock_create_role, mock_validate):
+        perm_list = ["my", "list"]
+        mock_create_role.return_value = "role el"
+        cmd_acl.create_role(self.mock_env, "role_id", perm_list, "desc")
+        self.assert_get_cib_called()
+        mock_validate.assert_called_once_with(self.cib, perm_list)
+        mock_create_role.assert_called_once_with(self.cib, "role_id", "desc")
+        mock_add_perm.assert_called_once_with("role el", perm_list)
+        self.assert_same_cib_pushed()
+
+    def test_no_permission(
+        self, mock_add_perm, mock_create_role, mock_validate
+    ):
+        mock_create_role.return_value = "role el"
+        cmd_acl.create_role(self.mock_env, "role_id", [], "desc")
+        self.assert_get_cib_called()
+        self.assertEqual(0, mock_validate.call_count)
+        mock_create_role.assert_called_once_with(self.cib, "role_id", "desc")
+        self.assertEqual(0, mock_add_perm.call_count)
+        self.assert_same_cib_pushed()
+
+
+ at mock.patch("pcs.lib.cib.acl.remove_role")
+class RemoveRoleTest(AclCommandsTest):
+    def test_success_no_autodelete(self, mock_remove):
+        cmd_acl.remove_role(self.mock_env, "role_id", False)
+        self.assert_get_cib_called()
+        mock_remove.assert_called_once_with(self.cib, "role_id", False)
+        self.assert_same_cib_pushed()
+
+    def test_success_autodelete(self, mock_remove):
+        cmd_acl.remove_role(self.mock_env, "role_id", True)
+        self.assert_get_cib_called()
+        mock_remove.assert_called_once_with(self.cib, "role_id", True)
+        self.assert_same_cib_pushed()
+
+    def test_role_not_found(self, mock_remove):
+        mock_remove.side_effect = acl_lib.AclRoleNotFound("role_id")
+        assert_raise_library_error(
+            lambda: cmd_acl.remove_role(self.mock_env, "role_id", True),
+            (
+                Severities.ERROR,
+                report_codes.ID_NOT_FOUND,
+                {
+                    "id": "role_id",
+                    "id_description": "role",
+                }
+            )
+        )
+        self.assert_get_cib_called()
+        mock_remove.assert_called_once_with(self.cib, "role_id", True)
+        self.assert_cib_not_pushed()
+
+
+ at mock.patch("pcs.lib.commands.acl._get_target_or_group")
+ at mock.patch("pcs.lib.cib.acl.assign_role")
+ at mock.patch("pcs.lib.cib.acl.find_role")
+ at mock.patch("pcs.lib.cib.acl.acl_error_to_report_item")
+class AssignRoleNotSpecific(AclCommandsTest, ExtendedAssertionsMixin):
+    def test_success(
+        self, mock_error_convert, mock_find_role, mock_assign, mock_get_tg
+    ):
+        mock_get_tg.return_value = "target_el"
+        mock_find_role.return_value = "role_el"
+        cmd_acl.assign_role_not_specific(self.mock_env, "role_id", "target_id")
+        self.assert_get_cib_called()
+        mock_get_tg.assert_called_once_with(self.cib, "target_id")
+        mock_find_role.assert_called_once_with(self.cib, "role_id")
+        mock_assign.assert_called_once_with("target_el", "role_el")
+        self.assertEqual(0, mock_error_convert.call_count)
+        self.assert_same_cib_pushed()
+
+    def test_failure(
+        self, mock_error_convert, mock_find_role, mock_assign, mock_get_tg
+    ):
+        mock_get_tg.return_value = "target_el"
+        exception_obj = acl_lib.AclRoleNotFound("role_id")
+        mock_find_role.side_effect = exception_obj
+        self.assert_raises(
+            LibraryError,
+            lambda: cmd_acl.assign_role_not_specific(
+                self.mock_env, "role_id", "target_id"
+            )
+        )
+        self.assert_get_cib_called()
+        self.assertEqual(0, mock_assign.call_count)
+        mock_error_convert.assert_called_once_with(exception_obj)
+        self.assert_cib_not_pushed()
+
+
+ at mock.patch("pcs.lib.cib.acl.find_target")
+ at mock.patch("pcs.lib.cib.acl.find_group")
+class GetTargetOrGroupTest(AclCommandsTest):
+    def test_target(self, mock_find_group, mock_find_target):
+        mock_find_target.return_value = "target_el"
+        self.assertEqual(
+            "target_el", cmd_acl._get_target_or_group(self.cib, "target_id")
+        )
+        mock_find_target.assert_called_once_with(self.cib, "target_id")
+        self.assertEqual(0, mock_find_group.call_count)
+
+    def test_group(self, mock_find_group, mock_find_target):
+        mock_find_target.side_effect = acl_lib.AclTargetNotFound("group_id")
+        mock_find_group.return_value = "group_el"
+        self.assertEqual(
+            "group_el", cmd_acl._get_target_or_group(self.cib, "group_id")
+        )
+        mock_find_target.assert_called_once_with(self.cib, "group_id")
+        mock_find_group.assert_called_once_with(self.cib, "group_id")
+
+    def test_not_found(self, mock_find_group, mock_find_target):
+        mock_find_target.side_effect = acl_lib.AclTargetNotFound("id")
+        mock_find_group.side_effect = acl_lib.AclGroupNotFound("id")
+        assert_raise_library_error(
+            lambda: cmd_acl._get_target_or_group(self.cib, "id"),
+            (
+                Severities.ERROR,
+                report_codes.ID_NOT_FOUND,
+                {
+                    "id": "id",
+                    "id_description": "user/group",
+                }
+            )
+        )
+        mock_find_target.assert_called_once_with(self.cib, "id")
+        mock_find_group.assert_called_once_with(self.cib, "id")
+
+
+ at mock.patch("pcs.lib.cib.acl.assign_role")
+ at mock.patch("pcs.lib.cib.acl.find_role")
+ at mock.patch("pcs.lib.cib.acl.find_target")
+ at mock.patch("pcs.lib.cib.acl.acl_error_to_report_item")
+class AssignRoleToTargetTest(AclCommandsTest):
+    def test_success(
+        self, mock_error_convert, mock_target, mock_role, mock_assign
+    ):
+        mock_target.return_value = "target_el"
+        mock_role.return_value = "role_el"
+        cmd_acl.assign_role_to_target(self.mock_env, "role_id", "target_id")
+        self.assert_get_cib_called()
+        mock_target.assert_called_once_with(self.cib, "target_id")
+        mock_role.assert_called_once_with(self.cib, "role_id")
+        mock_assign.assert_called_once_with("target_el", "role_el")
+        self.assert_same_cib_pushed()
+        self.assertEqual(0, mock_error_convert.call_count)
+
+    def test_failure(
+        self, mock_error_convert, mock_target, mock_role, mock_assign
+    ):
+        exception_obj = acl_lib.AclTargetNotFound("target_id")
+        mock_target.side_effect = exception_obj
+        mock_role.return_value = "role_el"
+        self.assert_raises(
+            LibraryError,
+            lambda: cmd_acl.assign_role_to_target(
+                self.mock_env, "role_id", "target_id"
+            )
+        )
+        self.assert_get_cib_called()
+        mock_target.assert_called_once_with(self.cib, "target_id")
+        mock_error_convert.assert_called_once_with(exception_obj)
+        self.assertEqual(0, mock_assign.call_count)
+        self.assert_cib_not_pushed()
+
+
+ at mock.patch("pcs.lib.cib.acl.assign_role")
+ at mock.patch("pcs.lib.cib.acl.find_role")
+ at mock.patch("pcs.lib.cib.acl.find_group")
+ at mock.patch("pcs.lib.cib.acl.acl_error_to_report_item")
+class AssignRoleToGroupTest(AclCommandsTest):
+    def test_success(
+        self, mock_error_convert, mock_group, mock_role, mock_assign
+    ):
+        mock_group.return_value = "group_el"
+        mock_role.return_value = "role_el"
+        cmd_acl.assign_role_to_group(self.mock_env, "role_id", "group_id")
+        self.assert_get_cib_called()
+        mock_group.assert_called_once_with(self.cib, "group_id")
+        mock_role.assert_called_once_with(self.cib, "role_id")
+        mock_assign.assert_called_once_with("group_el", "role_el")
+        self.assert_same_cib_pushed()
+        self.assertEqual(0, mock_error_convert.call_count)
+
+    def test_failure(
+        self, mock_error_convert, mock_group, mock_role, mock_assign
+    ):
+        exception_obj = acl_lib.AclGroupNotFound("group_id")
+        mock_group.side_effect = exception_obj
+        mock_role.return_value = "role_el"
+        self.assert_raises(
+            LibraryError,
+            lambda: cmd_acl.assign_role_to_group(
+                self.mock_env, "role_id", "group_id"
+            )
+        )
+        self.assert_get_cib_called()
+        mock_group.assert_called_once_with(self.cib, "group_id")
+        mock_error_convert.assert_called_once_with(exception_obj)
+        self.assertEqual(0, mock_assign.call_count)
+        self.assert_cib_not_pushed()
+
+
+ at mock.patch("pcs.lib.commands.acl._get_target_or_group")
+ at mock.patch("pcs.lib.cib.acl.unassign_role")
+class UnassignRoleNotSpecificTest(AclCommandsTest):
+    def test_success(self,  mock_unassign, mock_tg):
+        mock_tg.return_value = "target_el"
+        cmd_acl.unassign_role_not_specific(
+            self.mock_env, "role_id", "target_id", False
+        )
+        self.assert_get_cib_called()
+        mock_tg.assert_called_once_with(self.cib, "target_id")
+        mock_unassign.assert_called_once_with("target_el", "role_id", False)
+        self.assert_same_cib_pushed()
+
+    def test_success_with_autodelete(self,  mock_unassign, mock_tg):
+        mock_tg.return_value = "target_el"
+        cmd_acl.unassign_role_not_specific(
+            self.mock_env, "role_id", "target_id", True
+        )
+        self.assert_get_cib_called()
+        mock_tg.assert_called_once_with(self.cib, "target_id")
+        mock_unassign.assert_called_once_with("target_el", "role_id", True)
+        self.assert_same_cib_pushed()
+
+
+ at mock.patch("pcs.lib.cib.acl.unassign_role")
+ at mock.patch("pcs.lib.cib.acl.find_target")
+ at mock.patch("pcs.lib.cib.acl.acl_error_to_report_item")
+class UnassignRoleFromTargetTest(AclCommandsTest):
+    def test_success(self, mock_error_convert, mock_find_el, mock_unassign):
+        mock_find_el.return_value = "el"
+        cmd_acl.unassign_role_from_target(
+            self.mock_env, "role_id", "el_id", False
+        )
+        self.assert_get_cib_called()
+        mock_find_el.assert_called_once_with(self.cib, "el_id")
+        mock_unassign.assert_called_once_with("el", "role_id", False)
+        self.assert_same_cib_pushed()
+        self.assertEqual(0, mock_error_convert.call_count)
+
+    def test_success_autodelete(
+        self, mock_error_convert, mock_find_el, mock_unassign
+    ):
+        mock_find_el.return_value = "el"
+        cmd_acl.unassign_role_from_target(
+            self.mock_env, "role_id", "el_id", True
+        )
+        self.assert_get_cib_called()
+        mock_find_el.assert_called_once_with(self.cib, "el_id")
+        mock_unassign.assert_called_once_with("el", "role_id", True)
+        self.assert_same_cib_pushed()
+        self.assertEqual(0, mock_error_convert.call_count)
+
+    def test_failure(self, mock_error_convert, mock_find_el, mock_unassign):
+        exception_obj = acl_lib.AclTargetNotFound("el_id")
+        mock_find_el.side_effect = exception_obj
+        self.assert_raises(
+            LibraryError,
+            lambda: cmd_acl.unassign_role_from_target(
+                self.mock_env, "role_id", "el_id", False
+            )
+        )
+        self.assert_get_cib_called()
+        mock_find_el.assert_called_once_with(self.cib, "el_id")
+        self.assertEqual(0, mock_unassign.call_count)
+        self.assert_cib_not_pushed()
+        mock_error_convert.assert_called_once_with(exception_obj)
+
+
+ at mock.patch("pcs.lib.cib.acl.unassign_role")
+ at mock.patch("pcs.lib.cib.acl.find_group")
+ at mock.patch("pcs.lib.cib.acl.acl_error_to_report_item")
+class UnassignRoleFromGroupTest(AclCommandsTest):
+    def test_success(self, mock_error_convert, mock_find_el, mock_unassign):
+        mock_find_el.return_value = "el"
+        cmd_acl.unassign_role_from_group(
+            self.mock_env, "role_id", "el_id", False
+        )
+        self.assert_get_cib_called()
+        mock_find_el.assert_called_once_with(self.cib, "el_id")
+        mock_unassign.assert_called_once_with("el", "role_id", False)
+        self.assert_same_cib_pushed()
+        self.assertEqual(0, mock_error_convert.call_count)
+
+    def test_success_autodelete(
+        self, mock_error_convert, mock_find_el, mock_unassign
+    ):
+        mock_find_el.return_value = "el"
+        cmd_acl.unassign_role_from_group(
+            self.mock_env, "role_id", "el_id", True
+        )
+        self.assert_get_cib_called()
+        mock_find_el.assert_called_once_with(self.cib, "el_id")
+        mock_unassign.assert_called_once_with("el", "role_id", True)
+        self.assert_same_cib_pushed()
+        self.assertEqual(0, mock_error_convert.call_count)
+
+    def test_failure(self, mock_error_convert, mock_find_el, mock_unassign):
+        exception_obj = acl_lib.AclGroupNotFound("el_id")
+        mock_find_el.side_effect = exception_obj
+        self.assert_raises(
+            LibraryError,
+            lambda: cmd_acl.unassign_role_from_group(
+                self.mock_env, "role_id", "el_id", False
+            )
+        )
+        self.assert_get_cib_called()
+        mock_find_el.assert_called_once_with(self.cib, "el_id")
+        self.assertEqual(0, mock_unassign.call_count)
+        self.assert_cib_not_pushed()
+        mock_error_convert.assert_called_once_with(exception_obj)
+
+
+ at mock.patch("pcs.lib.cib.acl.assign_role")
+ at mock.patch("pcs.lib.cib.acl.find_role")
+class AssignRolesToElement(AclCommandsTest):
+    def test_success(self, mock_role, mock_assign):
+        mock_role.side_effect = lambda _, el_id: "{0}_el".format(el_id)
+        cmd_acl._assign_roles_to_element(
+            self.cib, "el", ["role1", "role2", "role3"]
+        )
+        mock_role.assert_has_calls([
+            mock.call(self.cib, "role1"),
+            mock.call(self.cib, "role2"),
+            mock.call(self.cib, "role3")
+        ])
+        mock_assign.assert_has_calls([
+            mock.call("el", "role1_el"),
+            mock.call("el", "role2_el"),
+            mock.call("el", "role3_el")
+        ])
+
+    def test_failure(self, mock_role, mock_assign):
+        def _mock_role(_, el_id):
+            if el_id in ["role1", "role3"]:
+                raise acl_lib.AclRoleNotFound(el_id)
+            elif el_id == "role2":
+                return "role2_el"
+            else:
+                raise AssertionError("unexpected input")
+
+        mock_role.side_effect = _mock_role
+        assert_raise_library_error(
+            lambda: cmd_acl._assign_roles_to_element(
+                self.cib, "el", ["role1", "role2", "role3"]
+            ),
+            (
+                Severities.ERROR,
+                report_codes.ID_NOT_FOUND,
+                {
+                    "id": "role1",
+                    "id_description": "role",
+                }
+            ),
+            (
+                Severities.ERROR,
+                report_codes.ID_NOT_FOUND,
+                {
+                    "id": "role3",
+                    "id_description": "role",
+                }
+            )
+        )
+        mock_role.assert_has_calls([
+            mock.call(self.cib, "role1"),
+            mock.call(self.cib, "role2"),
+            mock.call(self.cib, "role3")
+        ])
+        mock_assign.assert_called_once_with("el", "role2_el")
+
+
+ at mock.patch("pcs.lib.cib.acl.create_target")
+ at mock.patch("pcs.lib.commands.acl._assign_roles_to_element")
+class CreateTargetTest(AclCommandsTest):
+    def test_success(self, mock_assign, mock_create):
+        mock_create.return_value = "el"
+        cmd_acl.create_target(
+            self.mock_env, "el_id", ["role1", "role2", "role3"]
+        )
+        self.assert_get_cib_called()
+        mock_create.assert_called_once_with(self.cib, "el_id")
+        mock_assign(self.cib, "el", ["role1", "role2", "role3"])
+        self.assert_same_cib_pushed()
+
+
+ at mock.patch("pcs.lib.cib.acl.create_group")
+ at mock.patch("pcs.lib.commands.acl._assign_roles_to_element")
+class CreateGroupTest(AclCommandsTest):
+    def test_success(self, mock_assign, mock_create):
+        mock_create.return_value = "el"
+        cmd_acl.create_group(
+            self.mock_env, "el_id", ["role1", "role2", "role3"]
+        )
+        self.assert_get_cib_called()
+        mock_create.assert_called_once_with(self.cib, "el_id")
+        mock_assign(self.cib, "el", ["role1", "role2", "role3"])
+        self.assert_same_cib_pushed()
+
+
+ at mock.patch("pcs.lib.cib.acl.remove_target")
+class RemoveTargetTest(AclCommandsTest):
+    def test_success(self, mock_remove):
+        cmd_acl.remove_target(self.mock_env, "el_id")
+        self.assert_get_cib_called()
+        mock_remove.assert_called_once_with(self.cib, "el_id")
+        self.assert_same_cib_pushed()
+
+
+ at mock.patch("pcs.lib.cib.acl.remove_group")
+class RemoveGroupTest(AclCommandsTest):
+    def test_success(self, mock_remove):
+        cmd_acl.remove_group(self.mock_env, "el_id")
+        self.assert_get_cib_called()
+        mock_remove.assert_called_once_with(self.cib, "el_id")
+        self.assert_same_cib_pushed()
+
+
+ at mock.patch("pcs.lib.cib.acl.validate_permissions")
+ at mock.patch("pcs.lib.cib.acl.provide_role")
+ at mock.patch("pcs.lib.cib.acl.add_permissions_to_role")
+class AddPermissionTest(AclCommandsTest):
+    def test_success(self, mock_add_perm, mock_provide_role, mock_validate):
+        mock_provide_role.return_value = "role_el"
+        cmd_acl.add_permission(self.mock_env, "role_id", "permission_list")
+        self.assert_get_cib_called()
+        mock_validate.assert_called_once_with(self.cib, "permission_list")
+        mock_provide_role.assert_called_once_with(self.cib, "role_id")
+        mock_add_perm.assert_called_once_with("role_el", "permission_list")
+        self.assert_same_cib_pushed()
+
+
+ at mock.patch("pcs.lib.cib.acl.remove_permission")
+class RemovePermission(AclCommandsTest):
+    def test_success(self, mock_remove):
+        cmd_acl.remove_permission(self.mock_env, "id")
+        self.assert_get_cib_called()
+        mock_remove.assert_called_once_with(self.cib, "id")
+        self.assert_same_cib_pushed()
+
+
+ at mock.patch("pcs.lib.cib.acl.get_target_list")
+ at mock.patch("pcs.lib.cib.acl.get_group_list")
+ at mock.patch("pcs.lib.cib.acl.get_role_list")
+class GetConfigTest(AclCommandsTest):
+    def test_success(self, mock_role, mock_group, mock_target):
+        mock_role.return_value = "role"
+        mock_group.return_value = "group"
+        mock_target.return_value = "target"
+        self.assertEqual(
+            {
+                "target_list": "target",
+                "group_list": "group",
+                "role_list": "role",
+            },
+            cmd_acl.get_config(self.mock_env)
+        )
+
diff --git a/pcs/lib/commands/test/test_alert.py b/pcs/lib/commands/test/test_alert.py
index bc68baf..440c230 100644
--- a/pcs/lib/commands/test/test_alert.py
+++ b/pcs/lib/commands/test/test_alert.py
@@ -14,6 +14,7 @@ from pcs.test.tools.pcs_unittest import mock
 from pcs.test.tools.assertions import (
     assert_raise_library_error,
     assert_xml_equal,
+    assert_report_item_list_equal,
 )
 from pcs.test.tools.custom_mock import MockLibraryReportProcessor
 
@@ -278,8 +279,10 @@ class RemoveAlertTest(TestCase):
             <cib validate-with="pacemaker-2.5">
                 <configuration>
                     <alerts>
-                        <alert id="alert" path="path"/>
-                        <alert id="alert-1" path="/path"/>
+                        <alert id="alert1" path="path"/>
+                        <alert id="alert2" path="/path"/>
+                        <alert id="alert3" path="/path"/>
+                        <alert id="alert4" path="/path"/>
                     </alerts>
                 </configuration>
             </cib>
@@ -288,29 +291,80 @@ class RemoveAlertTest(TestCase):
             self.mock_log, self.mock_rep, cib_data=cib
         )
 
-    def test_success(self):
-        cmd_alert.remove_alert(self.mock_env, "alert")
+    def test_one_alert(self):
+        cmd_alert.remove_alert(self.mock_env, ["alert2"])
         assert_xml_equal(
             """
                 <cib validate-with="pacemaker-2.5">
                     <configuration>
                         <alerts>
-                            <alert id="alert-1" path="/path"/>
+                            <alert id="alert1" path="path"/>
+                            <alert id="alert3" path="/path"/>
+                            <alert id="alert4" path="/path"/>
                         </alerts>
                     </configuration>
                 </cib>
             """,
             self.mock_env._get_cib_xml()
         )
+        self.assertEqual([], self.mock_rep.report_item_list)
 
-    def test_not_existing_alert(self):
-        assert_raise_library_error(
-            lambda: cmd_alert.remove_alert(self.mock_env, "unknown"),
+    def test_multiple_alerts(self):
+        cmd_alert.remove_alert(self.mock_env, ["alert1", "alert3", "alert4"])
+        assert_xml_equal(
+            """
+                <cib validate-with="pacemaker-2.5">
+                    <configuration>
+                        <alerts>
+                            <alert id="alert2" path="/path"/>
+                        </alerts>
+                    </configuration>
+                </cib>
+            """,
+            self.mock_env._get_cib_xml()
+        )
+        self.assertEqual([], self.mock_rep.report_item_list)
+
+    def test_no_alert(self):
+        cmd_alert.remove_alert(self.mock_env, [])
+        assert_xml_equal(
+            """
+                <cib validate-with="pacemaker-2.5">
+                    <configuration>
+                        <alerts>
+                            <alert id="alert1" path="path"/>
+                            <alert id="alert2" path="/path"/>
+                            <alert id="alert3" path="/path"/>
+                            <alert id="alert4" path="/path"/>
+                        </alerts>
+                    </configuration>
+                </cib>
+            """,
+            self.mock_env._get_cib_xml()
+        )
+        self.assertEqual([], self.mock_rep.report_item_list)
+
+    def test_failure(self):
+        report_list = [
             (
                 Severities.ERROR,
                 report_codes.CIB_ALERT_NOT_FOUND,
                 {"alert": "unknown"}
+            ),
+            (
+                Severities.ERROR,
+                report_codes.CIB_ALERT_NOT_FOUND,
+                {"alert": "unknown2"}
             )
+        ]
+        assert_raise_library_error(
+            lambda: cmd_alert.remove_alert(
+                self.mock_env, ["unknown", "alert1", "unknown2", "alert2"]
+            ),
+            *report_list
+        )
+        assert_report_item_list_equal(
+            self.mock_rep.report_item_list, report_list
         )
 
 
@@ -614,8 +668,12 @@ class RemoveRecipientTest(TestCase):
                 <configuration>
                     <alerts>
                         <alert id="alert" path="path">
-                            <recipient id="alert-recipient" value="value1"/>
-                            <recipient id="alert-recipient-1" value="value"/>
+                            <recipient id="alert-recipient1" value="value1"/>
+                            <recipient id="alert-recipient2" value="value2"/>
+                        </alert>
+                        <alert id="alert2" path="path">
+                            <recipient id="alert2-recipient3" value="value3"/>
+                            <recipient id="alert2-recipient4" value="value4"/>
                         </alert>
                     </alerts>
                 </configuration>
@@ -626,26 +684,87 @@ class RemoveRecipientTest(TestCase):
         )
 
     def test_recipient_not_found(self):
-        assert_raise_library_error(
-            lambda: cmd_alert.remove_recipient(
-                self.mock_env, "recipient"
-            ),
+        report_list = [
             (
                 Severities.ERROR,
                 report_codes.ID_NOT_FOUND,
                 {"id": "recipient"}
+            ),
+            (
+                Severities.ERROR,
+                report_codes.ID_NOT_FOUND,
+                {"id": "alert2-recipient1"}
             )
+        ]
+        assert_raise_library_error(
+            lambda: cmd_alert.remove_recipient(
+                self.mock_env,
+                ["recipient", "alert-recipient1", "alert2-recipient1"]
+            ),
+            *report_list
+        )
+        assert_report_item_list_equal(
+            self.mock_rep.report_item_list, report_list
         )
 
-    def test_success(self):
-        cmd_alert.remove_recipient(self.mock_env, "alert-recipient")
+    def test_one_recipient(self):
+        cmd_alert.remove_recipient(self.mock_env, ["alert-recipient1"])
         assert_xml_equal(
             """
             <cib validate-with="pacemaker-2.5">
                 <configuration>
                     <alerts>
                         <alert id="alert" path="path">
-                            <recipient id="alert-recipient-1" value="value"/>
+                            <recipient id="alert-recipient2" value="value2"/>
+                        </alert>
+                        <alert id="alert2" path="path">
+                            <recipient id="alert2-recipient3" value="value3"/>
+                            <recipient id="alert2-recipient4" value="value4"/>
+                        </alert>
+                    </alerts>
+                </configuration>
+            </cib>
+            """,
+            self.mock_env._get_cib_xml()
+        )
+        self.assertEqual([], self.mock_rep.report_item_list)
+
+    def test_multiple_recipients(self):
+        cmd_alert.remove_recipient(
+            self.mock_env,
+            ["alert-recipient1", "alert-recipient2", "alert2-recipient4"]
+        )
+        assert_xml_equal(
+            """
+            <cib validate-with="pacemaker-2.5">
+                <configuration>
+                    <alerts>
+                        <alert id="alert" path="path"/>
+                        <alert id="alert2" path="path">
+                            <recipient id="alert2-recipient3" value="value3"/>
+                        </alert>
+                    </alerts>
+                </configuration>
+            </cib>
+            """,
+            self.mock_env._get_cib_xml()
+        )
+        self.assertEqual([], self.mock_rep.report_item_list)
+
+    def test_no_recipient(self):
+        cmd_alert.remove_recipient(self.mock_env, [])
+        assert_xml_equal(
+            """
+            <cib validate-with="pacemaker-2.5">
+                <configuration>
+                    <alerts>
+                        <alert id="alert" path="path">
+                            <recipient id="alert-recipient1" value="value1"/>
+                            <recipient id="alert-recipient2" value="value2"/>
+                        </alert>
+                        <alert id="alert2" path="path">
+                            <recipient id="alert2-recipient3" value="value3"/>
+                            <recipient id="alert2-recipient4" value="value4"/>
                         </alert>
                     </alerts>
                 </configuration>
@@ -653,6 +772,7 @@ class RemoveRecipientTest(TestCase):
             """,
             self.mock_env._get_cib_xml()
         )
+        self.assertEqual([], self.mock_rep.report_item_list)
 
 
 @mock.patch("pcs.lib.cib.alert.get_all_alerts")
diff --git a/pcs/lib/commands/test/test_booth.py b/pcs/lib/commands/test/test_booth.py
index 6bcab2b..c24eea2 100644
--- a/pcs/lib/commands/test/test_booth.py
+++ b/pcs/lib/commands/test/test_booth.py
@@ -426,7 +426,6 @@ class PullConfigTest(TestCase):
                     report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
                     {
                         "node": None,
-                        "name": "name",
                         "name_list": ["name"]
                     }
                 )
@@ -467,7 +466,6 @@ class PullConfigTest(TestCase):
                     report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
                     {
                         "node": None,
-                        "name": "name",
                         "name_list": ["name"]
                     }
                 )
diff --git a/pcs/lib/commands/test/test_resource_agent.py b/pcs/lib/commands/test/test_resource_agent.py
new file mode 100644
index 0000000..9652591
--- /dev/null
+++ b/pcs/lib/commands/test/test_resource_agent.py
@@ -0,0 +1,362 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import logging
+from lxml import etree
+
+from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.pcs_unittest import mock, TestCase
+
+from pcs.common import report_codes
+from pcs.lib import resource_agent as lib_ra
+from pcs.lib.env import LibraryEnvironment
+from pcs.lib.errors import ReportItemSeverity as severity
+
+from pcs.lib.commands import resource_agent as lib
+
+
+ at mock.patch("pcs.lib.resource_agent.list_resource_agents_standards")
+ at mock.patch.object(
+    LibraryEnvironment,
+    "cmd_runner",
+    lambda self: "mock_runner"
+)
+class TestListStandards(TestCase):
+    def setUp(self):
+        self.mock_logger = mock.MagicMock(logging.Logger)
+        self.mock_reporter = MockLibraryReportProcessor()
+        self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+
+    def test_success(self, mock_list_standards):
+        standards = [
+            "lsb",
+            "nagios",
+            "ocf",
+            "service",
+            "systemd",
+        ]
+        mock_list_standards.return_value = standards
+
+        self.assertEqual(
+            lib.list_standards(self.lib_env),
+            standards
+        )
+
+        mock_list_standards.assert_called_once_with("mock_runner")
+
+
+ at mock.patch("pcs.lib.resource_agent.list_resource_agents_ocf_providers")
+ at mock.patch.object(
+    LibraryEnvironment,
+    "cmd_runner",
+    lambda self: "mock_runner"
+)
+class TestListOcfProviders(TestCase):
+    def setUp(self):
+        self.mock_logger = mock.MagicMock(logging.Logger)
+        self.mock_reporter = MockLibraryReportProcessor()
+        self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+
+    def test_success(self, mock_list_providers):
+        providers = [
+            "booth",
+            "heartbeat",
+            "openstack",
+            "pacemaker",
+        ]
+        mock_list_providers.return_value = providers
+
+        self.assertEqual(
+            lib.list_ocf_providers(self.lib_env),
+            providers
+        )
+
+        mock_list_providers.assert_called_once_with("mock_runner")
+
+
+ at mock.patch("pcs.lib.resource_agent.list_resource_agents_standards")
+ at mock.patch("pcs.lib.resource_agent.list_resource_agents")
+ at mock.patch.object(
+    LibraryEnvironment,
+    "cmd_runner",
+    lambda self: "mock_runner"
+)
+class TestListAgentsForStandardAndProvider(TestCase):
+    def setUp(self):
+        self.mock_logger = mock.MagicMock(logging.Logger)
+        self.mock_reporter = MockLibraryReportProcessor()
+        self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+
+    def test_standard_specified(self, mock_list_agents, mock_list_standards):
+        agents = [
+            "Delay",
+            "Dummy",
+            "Stateful",
+        ]
+        mock_list_agents.return_value = agents
+
+        self.assertEqual(
+            lib.list_agents_for_standard_and_provider(self.lib_env, "ocf:test"),
+            agents
+        )
+
+        mock_list_agents.assert_called_once_with("mock_runner", "ocf:test")
+        mock_list_standards.assert_not_called()
+
+
+    def test_standard_not_specified(
+        self, mock_list_agents, mock_list_standards
+    ):
+        agents_ocf = [
+            "Delay",
+            "Dummy",
+            "Stateful",
+        ]
+        agents_service = [
+            "corosync",
+            "pacemaker",
+            "pcsd",
+        ]
+        mock_list_standards.return_value = ["ocf:test", "service"]
+        mock_list_agents.side_effect = [agents_ocf, agents_service]
+
+        self.assertEqual(
+            lib.list_agents_for_standard_and_provider(self.lib_env),
+            sorted(agents_ocf + agents_service, key=lambda x: x.lower())
+        )
+
+        mock_list_standards.assert_called_once_with("mock_runner")
+        self.assertEqual(2, len(mock_list_agents.mock_calls))
+        mock_list_agents.assert_has_calls([
+            mock.call("mock_runner", "ocf:test"),
+            mock.call("mock_runner", "service"),
+        ])
+
+
+ at mock.patch(
+    "pcs.lib.resource_agent.list_resource_agents_standards_and_providers",
+    lambda runner: ["service", "ocf:test"]
+)
+ at mock.patch(
+    "pcs.lib.resource_agent.list_resource_agents",
+    lambda runner, standard: {
+        "ocf:test": [
+            "Stateful",
+            "Delay",
+        ],
+        "service": [
+            "corosync",
+            "pacemaker_remote",
+        ],
+    }.get(standard, [])
+)
+ at mock.patch.object(
+    LibraryEnvironment,
+    "cmd_runner",
+    lambda self: "mock_runner"
+)
+class TestListAgents(TestCase):
+    def setUp(self):
+        self.mock_logger = mock.MagicMock(logging.Logger)
+        self.mock_reporter = MockLibraryReportProcessor()
+        self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+
+    def test_list_all(self):
+        self.assertEqual(
+            lib.list_agents(self.lib_env, False, None),
+            [
+                {
+                    "name": "ocf:test:Delay",
+                    "shortdesc": "",
+                    "longdesc": "",
+                    "parameters": [],
+                    "actions": [],
+                },
+                {
+                    "name": "ocf:test:Stateful",
+                    "shortdesc": "",
+                    "longdesc": "",
+                    "parameters": [],
+                    "actions": [],
+                },
+                {
+                    "name": "service:corosync",
+                    "shortdesc": "",
+                    "longdesc": "",
+                    "parameters": [],
+                    "actions": [],
+                },
+                {
+                    "name": "service:pacemaker_remote",
+                    "shortdesc": "",
+                    "longdesc": "",
+                    "parameters": [],
+                    "actions": [],
+                },
+            ]
+        )
+
+
+    def test_search(self):
+        self.assertEqual(
+            lib.list_agents(self.lib_env, False, "te"),
+            [
+                {
+                    "name": "ocf:test:Delay",
+                    "shortdesc": "",
+                    "longdesc": "",
+                    "parameters": [],
+                    "actions": [],
+                },
+                {
+                    "name": "ocf:test:Stateful",
+                    "shortdesc": "",
+                    "longdesc": "",
+                    "parameters": [],
+                    "actions": [],
+                },
+                {
+                    "name": "service:pacemaker_remote",
+                    "shortdesc": "",
+                    "longdesc": "",
+                    "parameters": [],
+                    "actions": [],
+                },
+            ]
+        )
+
+
+    @mock.patch.object(lib_ra.Agent, "_get_metadata", autospec=True)
+    def test_describe(self, mock_metadata):
+        def mock_metadata_func(self):
+            if self._full_agent_name == "ocf:test:Stateful":
+                raise lib_ra.UnableToGetAgentMetadata(
+                    self._full_agent_name,
+                    "test exception"
+                )
+            return etree.XML("""
+                <resource-agent>
+                    <shortdesc>short {name}</shortdesc>
+                    <longdesc>long {name}</longdesc>
+                    <parameters>
+                    </parameters>
+                    <actions>
+                    </actions>
+                </resource-agent>
+            """.format(name=self._full_agent_name))
+        mock_metadata.side_effect = mock_metadata_func
+
+        # Stateful is missing as it does not provide valid metadata - see above
+        self.assertEqual(
+            lib.list_agents(self.lib_env, True, None),
+            [
+                {
+                    "name": "ocf:test:Delay",
+                    "shortdesc": "short ocf:test:Delay",
+                    "longdesc": "long ocf:test:Delay",
+                    "parameters": [],
+                    "actions": [],
+                },
+                {
+                    "name": "service:corosync",
+                    "shortdesc": "short service:corosync",
+                    "longdesc": "long service:corosync",
+                    "parameters": [],
+                    "actions": [],
+                },
+                {
+                    "name": "service:pacemaker_remote",
+                    "shortdesc": "short service:pacemaker_remote",
+                    "longdesc": "long service:pacemaker_remote",
+                    "parameters": [],
+                    "actions": [],
+                },
+            ]
+        )
+
+
+ at mock.patch.object(lib_ra.ResourceAgent, "_load_metadata", autospec=True)
+ at mock.patch("pcs.lib.resource_agent.guess_exactly_one_resource_agent_full_name")
+ at mock.patch.object(
+    LibraryEnvironment,
+    "cmd_runner",
+    lambda self: "mock_runner"
+)
+class TestDescribeAgent(TestCase):
+    def setUp(self):
+        self.mock_logger = mock.MagicMock(logging.Logger)
+        self.mock_reporter = MockLibraryReportProcessor()
+        self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+        self.metadata = """
+            <resource-agent>
+                <shortdesc>short desc</shortdesc>
+                <longdesc>long desc</longdesc>
+                <parameters>
+                </parameters>
+                <actions>
+                </actions>
+            </resource-agent>
+        """
+        self.description = {
+            "name": "ocf:test:Dummy",
+            "shortdesc": "short desc",
+            "longdesc": "long desc",
+            "parameters": [],
+            "actions": [],
+        }
+
+
+    def test_full_name_success(self, mock_guess, mock_metadata):
+        mock_metadata.return_value = self.metadata
+
+        self.assertEqual(
+            lib.describe_agent(self.lib_env, "ocf:test:Dummy"),
+            self.description
+        )
+
+        self.assertEqual(len(mock_metadata.mock_calls), 1)
+        mock_guess.assert_not_called()
+
+
+    def test_guess_success(self, mock_guess, mock_metadata):
+        mock_metadata.return_value = self.metadata
+        mock_guess.return_value = lib_ra.ResourceAgent(
+            self.lib_env.cmd_runner(),
+            "ocf:test:Dummy"
+        )
+
+        self.assertEqual(
+            lib.describe_agent(self.lib_env, "dummy"),
+            self.description
+        )
+
+        self.assertEqual(len(mock_metadata.mock_calls), 1)
+        mock_guess.assert_called_once_with("mock_runner", "dummy")
+
+
+    def test_full_name_fail(self, mock_guess, mock_metadata):
+        mock_metadata.return_value = "invalid xml"
+
+        assert_raise_library_error(
+            lambda: lib.describe_agent(self.lib_env, "ocf:test:Dummy"),
+            (
+                severity.ERROR,
+                report_codes.UNABLE_TO_GET_AGENT_METADATA,
+                {
+                    "agent": "ocf:test:Dummy",
+                    "reason": "Start tag expected, '<' not found, line 1, column 1",
+                }
+            )
+        )
+
+        self.assertEqual(len(mock_metadata.mock_calls), 1)
+        mock_guess.assert_not_called()
diff --git a/pcs/lib/commands/test/test_stonith_agent.py b/pcs/lib/commands/test/test_stonith_agent.py
new file mode 100644
index 0000000..eaf5f93
--- /dev/null
+++ b/pcs/lib/commands/test/test_stonith_agent.py
@@ -0,0 +1,212 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import logging
+from lxml import etree
+
+from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.pcs_unittest import mock, TestCase
+
+from pcs.common import report_codes
+from pcs.lib import resource_agent as lib_ra
+from pcs.lib.env import LibraryEnvironment
+from pcs.lib.errors import ReportItemSeverity as severity
+
+from pcs.lib.commands import stonith_agent as lib
+
+
+ at mock.patch(
+    "pcs.lib.resource_agent.list_stonith_agents",
+    lambda runner: [
+        "fence_apc",
+        "fence_dummy",
+        "fence_xvm",
+    ]
+)
+ at mock.patch.object(
+    LibraryEnvironment,
+    "cmd_runner",
+    lambda self: "mock_runner"
+)
+class TestListAgents(TestCase):
+    def setUp(self):
+        self.mock_logger = mock.MagicMock(logging.Logger)
+        self.mock_reporter = MockLibraryReportProcessor()
+        self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+
+    def tearDown(self):
+        lib_ra.StonithAgent._stonithd_metadata = None
+
+
+    def test_list_all(self):
+        self.assertEqual(
+            lib.list_agents(self.lib_env, False, None),
+            [
+                {
+                    "name": "fence_apc",
+                    "shortdesc": "",
+                    "longdesc": "",
+                    "parameters": [],
+                    "actions": [],
+                },
+                {
+                    "name": "fence_dummy",
+                    "shortdesc": "",
+                    "longdesc": "",
+                    "parameters": [],
+                    "actions": [],
+                },
+                {
+                    "name": "fence_xvm",
+                    "shortdesc": "",
+                    "longdesc": "",
+                    "parameters": [],
+                    "actions": [],
+                },
+            ]
+        )
+
+
+    def test_search(self):
+        self.assertEqual(
+            lib.list_agents(self.lib_env, False, "M"),
+            [
+                {
+                    "name": "fence_dummy",
+                    "shortdesc": "",
+                    "longdesc": "",
+                    "parameters": [],
+                    "actions": [],
+                },
+                {
+                    "name": "fence_xvm",
+                    "shortdesc": "",
+                    "longdesc": "",
+                    "parameters": [],
+                    "actions": [],
+                },
+            ]
+        )
+
+
+    @mock.patch.object(lib_ra.Agent, "_get_metadata", autospec=True)
+    def test_describe(self, mock_metadata):
+        def mock_metadata_func(self):
+            if self._full_agent_name == "ocf:test:Stateful":
+                raise lib_ra.UnableToGetAgentMetadata(
+                    self._full_agent_name,
+                    "test exception"
+                )
+            return etree.XML("""
+                <resource-agent>
+                    <shortdesc>short {name}</shortdesc>
+                    <longdesc>long {name}</longdesc>
+                    <parameters>
+                    </parameters>
+                    <actions>
+                    </actions>
+                </resource-agent>
+            """.format(name=self._full_agent_name))
+        mock_metadata.side_effect = mock_metadata_func
+
+        # Stateful is missing as it does not provide valid metadata - see above
+        self.assertEqual(
+            lib.list_agents(self.lib_env, True, None),
+            [
+                {
+                    "name": "fence_apc",
+                    "shortdesc": "short stonith:fence_apc",
+                    "longdesc": "long stonith:fence_apc",
+                    "parameters": [],
+                    "actions": [],
+                },
+                {
+                    "name": "fence_dummy",
+                    "shortdesc": "short stonith:fence_dummy",
+                    "longdesc": "long stonith:fence_dummy",
+                    "parameters": [],
+                    "actions": [],
+                },
+                {
+                    "name": "fence_xvm",
+                    "shortdesc": "short stonith:fence_xvm",
+                    "longdesc": "long stonith:fence_xvm",
+                    "parameters": [],
+                    "actions": [],
+                },
+            ]
+        )
+
+
+ at mock.patch.object(lib_ra.StonithAgent, "_load_metadata", autospec=True)
+ at mock.patch.object(
+    lib_ra.StonithdMetadata,
+    "get_parameters",
+    lambda self: []
+)
+ at mock.patch.object(
+    LibraryEnvironment,
+    "cmd_runner",
+    lambda self: "mock_runner"
+)
+class TestDescribeAgent(TestCase):
+    def setUp(self):
+        self.mock_logger = mock.MagicMock(logging.Logger)
+        self.mock_reporter = MockLibraryReportProcessor()
+        self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+        self.metadata = """
+            <resource-agent>
+                <shortdesc>short desc</shortdesc>
+                <longdesc>long desc</longdesc>
+                <parameters>
+                </parameters>
+                <actions>
+                </actions>
+            </resource-agent>
+        """
+        self.description = {
+            "name": "fence_dummy",
+            "shortdesc": "short desc",
+            "longdesc": "long desc",
+            "parameters": [],
+            "actions": [],
+        }
+
+
+    def tearDown(self):
+        lib_ra.StonithAgent._stonithd_metadata = None
+
+
+    def test_success(self, mock_metadata):
+        mock_metadata.return_value = self.metadata
+
+        self.assertEqual(
+            lib.describe_agent(self.lib_env, "fence_dummy"),
+            self.description
+        )
+
+        self.assertEqual(len(mock_metadata.mock_calls), 1)
+
+
+    def test_fail(self, mock_metadata):
+        mock_metadata.return_value = "invalid xml"
+
+        assert_raise_library_error(
+            lambda: lib.describe_agent(self.lib_env, "fence_dummy"),
+            (
+                severity.ERROR,
+                report_codes.UNABLE_TO_GET_AGENT_METADATA,
+                {
+                    "agent": "fence_dummy",
+                    "reason": "Start tag expected, '<' not found, line 1, column 1",
+                }
+            )
+        )
+
+        self.assertEqual(len(mock_metadata.mock_calls), 1)
diff --git a/pcs/lib/corosync/live.py b/pcs/lib/corosync/live.py
index 67aa0e4..6030375 100644
--- a/pcs/lib/corosync/live.py
+++ b/pcs/lib/corosync/live.py
@@ -20,9 +20,21 @@ def get_local_corosync_conf():
     path = settings.corosync_conf_file
     try:
         return open(path).read()
-    except IOError as e:
+    except EnvironmentError as e:
         raise LibraryError(reports.corosync_config_read_error(path, e.strerror))
 
+
+def get_local_cluster_conf():
+    """
+    Read cluster.conf file from local machine
+    """
+    path = settings.cluster_conf_file
+    try:
+        return open(path).read()
+    except EnvironmentError as e:
+        raise LibraryError(reports.cluster_conf_read_error(path, e.strerror))
+
+
 def exists_local_corosync_conf():
     return os.path.exists(settings.corosync_conf_file)
 
diff --git a/pcs/lib/env.py b/pcs/lib/env.py
index b139c58..f453be6 100644
--- a/pcs/lib/env.py
+++ b/pcs/lib/env.py
@@ -13,10 +13,12 @@ from pcs import settings
 from pcs.lib import reports
 from pcs.lib.booth.env import BoothEnv
 from pcs.lib.cib.tools import ensure_cib_version
+from pcs.lib.cluster_conf_facade import ClusterConfFacade
 from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade
 from pcs.lib.corosync.live import (
     exists_local_corosync_conf,
     get_local_corosync_conf,
+    get_local_cluster_conf,
     reload_config as reload_corosync_config,
 )
 from pcs.lib.external import (
@@ -51,6 +53,7 @@ class LibraryEnvironment(object):
         corosync_conf_data=None,
         booth=None,
         auth_tokens_getter=None,
+        cluster_conf_data=None,
     ):
         self._logger = logger
         self._report_processor = report_processor
@@ -58,6 +61,7 @@ class LibraryEnvironment(object):
         self._user_groups = [] if user_groups is None else user_groups
         self._cib_data = cib_data
         self._corosync_conf_data = corosync_conf_data
+        self._cluster_conf_data = cluster_conf_data
         self._booth = (
             BoothEnv(report_processor, booth) if booth is not None else None
         )
@@ -179,6 +183,23 @@ class LibraryEnvironment(object):
         else:
             self._corosync_conf_data = corosync_conf_data
 
+
+    def get_cluster_conf_data(self):
+        if self.is_cluster_conf_live:
+            return get_local_cluster_conf()
+        else:
+            return self._cluster_conf_data
+
+
+    def get_cluster_conf(self):
+        return ClusterConfFacade.from_string(self.get_cluster_conf_data())
+
+
+    @property
+    def is_cluster_conf_live(self):
+        return self._cluster_conf_data is None
+
+
     def is_node_in_cluster(self):
         if self.is_cman_cluster:
             #TODO --cluster_conf is not propagated here. So no live check not
@@ -202,7 +223,10 @@ class LibraryEnvironment(object):
         return self._corosync_conf_data is None
 
     def cmd_runner(self):
-        runner_env = dict()
+        runner_env = {
+            # make sure to get output of external processes in English and ASCII
+            "LC_ALL": "C",
+        }
         if self.user_login:
             runner_env["CIB_user"] = self.user_login
         return CommandRunner(self.logger, self.report_processor, runner_env)
diff --git a/pcs/lib/errors.py b/pcs/lib/errors.py
index 0a8f4fa..c4263f0 100644
--- a/pcs/lib/errors.py
+++ b/pcs/lib/errors.py
@@ -30,30 +30,28 @@ class ReportItemSeverity(object):
 
 class ReportItem(object):
     @classmethod
-    def error(cls, code, message_pattern, **kwargs):
-        return cls(code, ReportItemSeverity.ERROR, message_pattern, **kwargs)
+    def error(cls, code, **kwargs):
+        return cls(code, ReportItemSeverity.ERROR, **kwargs)
 
     @classmethod
-    def warning(cls, code, message_pattern, **kwargs):
-        return cls(code, ReportItemSeverity.WARNING, message_pattern, **kwargs)
+    def warning(cls, code, **kwargs):
+        return cls(code, ReportItemSeverity.WARNING, **kwargs)
 
     @classmethod
-    def info(cls, code, message_pattern, **kwargs):
-        return cls(code, ReportItemSeverity.INFO, message_pattern, **kwargs)
+    def info(cls, code, **kwargs):
+        return cls(code, ReportItemSeverity.INFO, **kwargs)
 
     @classmethod
-    def debug(cls, code, message_pattern, **kwargs):
-        return cls(code, ReportItemSeverity.DEBUG, message_pattern, **kwargs)
+    def debug(cls, code, **kwargs):
+        return cls(code, ReportItemSeverity.DEBUG, **kwargs)
 
     def __init__(
-        self, code, severity, message_pattern, forceable=None, info=None
+        self, code, severity, forceable=None, info=None
     ):
         self.code = code
         self.severity = severity
         self.forceable = forceable
-        self.message_pattern=message_pattern
         self.info = info if info else dict()
-        self.message = self.message_pattern.format(**self.info)
 
     def __repr__(self):
         return "{severity} {code}: {info}".format(
diff --git a/pcs/lib/external.py b/pcs/lib/external.py
index 074d2aa..160586a 100644
--- a/pcs/lib/external.py
+++ b/pcs/lib/external.py
@@ -58,6 +58,11 @@ from pcs.lib import reports
 from pcs.lib.errors import LibraryError, ReportItemSeverity
 
 
+
+_chkconfig = settings.chkconfig_binary
+_service = settings.service_binary
+_systemctl = settings.systemctl_binary
+
 class ManageServiceError(Exception):
     #pylint: disable=super-init-not-called
     def __init__(self, service, message=None, instance=None):
@@ -81,10 +86,6 @@ class KillServicesError(ManageServiceError):
     pass
 
 
-def is_path_runnable(path):
-    return os.path.isfile(path) and os.access(path, os.X_OK)
-
-
 def is_dir_nonempty(path):
     if not os.path.exists(path):
         return False
@@ -119,6 +120,7 @@ def is_systemctl():
         '/usr/bin/systemctl',
         '/bin/systemctl',
         '/var/run/systemd/system',
+        '/run/systemd/system',
     ]
     for path in systemctl_paths:
         if os.path.exists(path):
@@ -140,10 +142,10 @@ def disable_service(runner, service, instance=None):
         return
     if is_systemctl():
         stdout, stderr, retval = runner.run([
-            "systemctl", "disable", _get_service_name(service, instance)
+            _systemctl, "disable", _get_service_name(service, instance)
         ])
     else:
-        stdout, stderr, retval = runner.run(["chkconfig", service, "off"])
+        stdout, stderr, retval = runner.run([_chkconfig, service, "off"])
     if retval != 0:
         raise DisableServiceError(
             service,
@@ -164,10 +166,10 @@ def enable_service(runner, service, instance=None):
     """
     if is_systemctl():
         stdout, stderr, retval = runner.run([
-            "systemctl", "enable", _get_service_name(service, instance)
+            _systemctl, "enable", _get_service_name(service, instance)
         ])
     else:
-        stdout, stderr, retval = runner.run(["chkconfig", service, "on"])
+        stdout, stderr, retval = runner.run([_chkconfig, service, "on"])
     if retval != 0:
         raise EnableServiceError(
             service,
@@ -186,10 +188,10 @@ def start_service(runner, service, instance=None):
     """
     if is_systemctl():
         stdout, stderr, retval = runner.run([
-            "systemctl", "start", _get_service_name(service, instance)
+            _systemctl, "start", _get_service_name(service, instance)
         ])
     else:
-        stdout, stderr, retval = runner.run(["service", service, "start"])
+        stdout, stderr, retval = runner.run([_service, service, "start"])
     if retval != 0:
         raise StartServiceError(
             service,
@@ -208,10 +210,10 @@ def stop_service(runner, service, instance=None):
     """
     if is_systemctl():
         stdout, stderr, retval = runner.run([
-            "systemctl", "stop", _get_service_name(service, instance)
+            _systemctl, "stop", _get_service_name(service, instance)
         ])
     else:
-        stdout, stderr, retval = runner.run(["service", service, "stop"])
+        stdout, stderr, retval = runner.run([_service, service, "stop"])
     if retval != 0:
         raise StopServiceError(
             service,
@@ -248,10 +250,10 @@ def is_service_enabled(runner, service, instance=None):
     """
     if is_systemctl():
         dummy_stdout, dummy_stderr, retval = runner.run(
-            ["systemctl", "is-enabled", _get_service_name(service, instance)]
+            [_systemctl, "is-enabled", _get_service_name(service, instance)]
         )
     else:
-        dummy_stdout, dummy_stderr, retval = runner.run(["chkconfig", service])
+        dummy_stdout, dummy_stderr, retval = runner.run([_chkconfig, service])
 
     return retval == 0
 
@@ -265,13 +267,13 @@ def is_service_running(runner, service, instance=None):
     """
     if is_systemctl():
         dummy_stdout, dummy_stderr, retval = runner.run([
-            "systemctl",
+            _systemctl,
             "is-active",
             _get_service_name(service, instance)
         ])
     else:
         dummy_stdout, dummy_stderr, retval = runner.run(
-            ["service", service, "status"]
+            [_service, service, "status"]
         )
 
     return retval == 0
@@ -299,7 +301,7 @@ def get_non_systemd_services(runner):
     if is_systemctl():
         return []
 
-    stdout, dummy_stderr, return_code = runner.run(["chkconfig"])
+    stdout, dummy_stderr, return_code = runner.run([_chkconfig])
     if return_code != 0:
         return []
 
@@ -321,7 +323,7 @@ def get_systemd_services(runner):
         return []
 
     stdout, dummy_stderr, return_code = runner.run([
-        "systemctl", "list-unit-files", "--full"
+        _systemctl, "list-unit-files", "--full"
     ])
     if return_code != 0:
         return []
@@ -358,17 +360,25 @@ class CommandRunner(object):
     def __init__(self, logger, reporter, env_vars=None):
         self._logger = logger
         self._reporter = reporter
+        # Reset environment variables by empty dict is desired here.  We need
+        # to get rid of defaults - we do not know the context and environment
+        # where the library runs.  We also get rid of PATH settings, so all
+        # executables must be specified with full path unless the PATH variable
+        # is set from outside.
         self._env_vars = env_vars if env_vars else dict()
         self._python2 = sys.version[0] == "2"
 
     def run(
         self, args, stdin_string=None, env_extend=None, binary_output=False
     ):
-        #Reset environment variables by empty dict is desired here.  We need to
-        #get rid of defaults - we do not know the context and environment of the
-        #library.  So executable must be specified with full path.
-        env_vars = dict(env_extend) if env_extend else dict()
-        env_vars.update(self._env_vars)
+        # Allow overriding default settings. If a piece of code really wants to
+        # set own PATH or CIB_file, we must allow it. I.e. it wants to run
+        # a pacemaker tool on a CIB in a file but cannot afford the risk of
+        # changing the CIB in the file specified by the user.
+        env_vars = self._env_vars
+        env_vars.update(
+            dict(env_extend) if env_extend else dict()
+        )
 
         log_args = " ".join([shell_quote(x) for x in args])
         msg = "Running: {args}"
diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
index cff491c..b8e53b4 100644
--- a/pcs/lib/reports.py
+++ b/pcs/lib/reports.py
@@ -5,8 +5,6 @@ from __future__ import (
     unicode_literals,
 )
 
-from collections import Iterable
-
 from pcs.common import report_codes
 from pcs.lib.errors import ReportItem, ReportItemSeverity
 
@@ -18,7 +16,6 @@ def common_error(text):
     """
     return ReportItem.error(
         report_codes.COMMON_ERROR,
-        "{text}",
         info={"text": text}
     )
 
@@ -29,7 +26,6 @@ def common_info(text):
     """
     return ReportItem.info(
         report_codes.COMMON_INFO,
-        "{text}",
         info={"text": text}
     )
 
@@ -45,20 +41,9 @@ def resource_for_constraint_is_multiinstance(
     severity report item severity
     forceable is this report item forceable? by what cathegory?
     """
-    template = (
-        "{resource_id} is a clone resource, you should use the"
-        + " clone id: {parent_id} when adding constraints"
-    )
-    if parent_type == "master":
-        template = (
-            "{resource_id} is a master/slave resource, you should use the"
-            + " master id: {parent_id} when adding constraints"
-        )
-
     return ReportItem(
         report_codes.RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE,
         severity,
-        template,
         info={
             "resource_id": resource_id,
             "parent_type": parent_type,
@@ -81,7 +66,6 @@ def duplicate_constraints_exist(
     return ReportItem(
         report_codes.DUPLICATE_CONSTRAINTS_EXIST,
         severity,
-        "duplicate constraint already exists",
         info={
             "constraint_type": constraint_type,
             "constraint_info_list": constraint_info_list,
@@ -95,7 +79,6 @@ def empty_resource_set_list():
     """
     return ReportItem.error(
         report_codes.EMPTY_RESOURCE_SET_LIST,
-        "Resource set list is empty",
     )
 
 def required_option_is_missing(name):
@@ -104,7 +87,6 @@ def required_option_is_missing(name):
     """
     return ReportItem.error(
         report_codes.REQUIRED_OPTION_IS_MISSING,
-        "required option '{option_name}' is missing",
         info={
             "option_name": name
         }
@@ -122,18 +104,15 @@ def invalid_option(
     severity report item severity
     forceable is this report item forceable? by what cathegory?
     """
-    msg = "invalid option '{option_name}', allowed options are: {allowed_str}"
-    info = {
-        "option_name": option_name,
-        "option_type": option_type,
-        "allowed": sorted(allowed_options),
-        "allowed_str": ", ".join(sorted(allowed_options)),
-    }
-    if option_type:
-        msg = ("invalid {option_type} option '{option_name}'"
-            + ", allowed options are: {allowed_str}")
     return ReportItem(
-        report_codes.INVALID_OPTION, severity, msg, forceable, info
+        report_codes.INVALID_OPTION,
+        severity,
+        forceable,
+        info={
+            "option_name": option_name,
+            "option_type": option_type,
+            "allowed": sorted(allowed_options),
+        }
     )
 
 def invalid_option_value(
@@ -148,23 +127,13 @@ def invalid_option_value(
     severity report item severity
     forceable is this report item forceable? by what cathegory?
     """
-    allowed_iterable = (
-        isinstance(allowed_values, Iterable)
-        and
-        not isinstance(allowed_values, "".__class__)
-    )
-    allowed_str = (", ".join(allowed_values) if allowed_iterable
-        else allowed_values)
     return ReportItem(
         report_codes.INVALID_OPTION_VALUE,
         severity,
-        "'{option_value}' is not a valid {option_name} value"
-            + ", use {allowed_values_str}",
         info={
             "option_value": option_value,
             "option_name": option_name,
             "allowed_values": allowed_values,
-            "allowed_values_str": allowed_str,
         },
         forceable=forceable
     )
@@ -176,12 +145,10 @@ def invalid_id_is_empty(id, id_description):
     id_description string decribe id's role
     """
     return ReportItem.error(
-        report_codes.INVALID_ID,
-        "{id_description} cannot be empty",
+        report_codes.EMPTY_ID,
         info={
             "id": id,
             "id_description": id_description,
-            "reason": "empty",
         }
     )
 
@@ -195,16 +162,10 @@ def invalid_id_bad_char(id, id_description, bad_char, is_first_char):
     """
     return ReportItem.error(
         report_codes.INVALID_ID,
-        (
-            "invalid {{id_description}} '{{id}}', '{{invalid_character}}' "
-            + "is not a valid{0}character for a {{id_description}}"
-        ).format(" first " if is_first_char else " "),
         info={
             "id": id,
             "id_description": id_description,
-            "reason": "invalid{0}character".format(
-                " first " if is_first_char else " "
-            ),
+            "is_first_char": is_first_char,
             "invalid_character": bad_char,
         }
     )
@@ -216,7 +177,6 @@ def invalid_timeout(timeout):
     """
     return ReportItem.error(
         report_codes.INVALID_TIMEOUT_VALUE,
-        "'{timeout}' is not a valid number of seconds to wait",
         info={"timeout": timeout}
     )
 
@@ -227,7 +187,6 @@ def invalid_score(score):
     """
     return ReportItem.error(
         report_codes.INVALID_SCORE,
-        "invalid score '{score}', use integer or INFINITY or -INFINITY",
         info={
             "score": score,
         }
@@ -240,7 +199,6 @@ def multiple_score_options():
     """
     return ReportItem.error(
         report_codes.MULTIPLE_SCORE_OPTIONS,
-        "you cannot specify multiple score options",
     )
 
 def run_external_process_started(command, stdin):
@@ -249,13 +207,8 @@ def run_external_process_started(command, stdin):
     command string the external process command
     stdin string passed to the external process via its stdin
     """
-    msg = "Running: {command}"
-    if stdin:
-        msg += "\n--Debug Input Start--\n{stdin}\n--Debug Input End--"
-    msg += "\n"
     return ReportItem.debug(
         report_codes.RUN_EXTERNAL_PROCESS_STARTED,
-        msg,
         info={
             "command": command,
             "stdin": stdin,
@@ -272,9 +225,6 @@ def run_external_process_finished(command, retval, stdout, stderr):
     """
     return ReportItem.debug(
         report_codes.RUN_EXTERNAL_PROCESS_FINISHED,
-        "Finished running: {command}\nReturn value: {return_value}"
-        + "\n--Debug Stdout Start--\n{stdout}\n--Debug Stdout End--"
-        + "\n--Debug Stderr Start--\n{stderr}\n--Debug Stderr End--\n",
         info={
             "command": command,
             "return_value": retval,
@@ -291,7 +241,6 @@ def run_external_process_error(command, reason):
     """
     return ReportItem.error(
         report_codes.RUN_EXTERNAL_PROCESS_ERROR,
-        "unable to run command {command}: {reason}",
         info={
             "command": command,
             "reason": reason
@@ -304,13 +253,8 @@ def node_communication_started(target, data):
     target string where the request is about to be sent to
     data string request's data
     """
-    msg = "Sending HTTP Request to: {target}"
-    if data:
-        msg += "\n--Debug Input Start--\n{data}\n--Debug Input End--"
-    msg += "\n"
     return ReportItem.debug(
         report_codes.NODE_COMMUNICATION_STARTED,
-        msg,
         info={
             "target": target,
             "data": data,
@@ -326,9 +270,6 @@ def node_communication_finished(target, retval, data):
     """
     return ReportItem.debug(
         report_codes.NODE_COMMUNICATION_FINISHED,
-        "Finished calling: {target}\nResponse Code: {response_code}"
-        + "\n--Debug Response Start--\n{response_data}\n--Debug Response End--"
-        + "\n",
         info={
             "target": target,
             "response_code": retval,
@@ -344,7 +285,6 @@ def node_communication_not_connected(node, reason):
     """
     return ReportItem.debug(
         report_codes.NODE_COMMUNICATION_NOT_CONNECTED,
-        "Unable to connect to {node} ({reason})",
         info={
             "node": node,
             "reason": reason,
@@ -363,7 +303,6 @@ def node_communication_error_not_authorized(
     return ReportItem(
         report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
         severity,
-        "Unable to authenticate to {node} ({reason}), try running 'pcs cluster auth'",
         info={
             "node": node,
             "command": command,
@@ -384,7 +323,6 @@ def node_communication_error_permission_denied(
     return ReportItem(
         report_codes.NODE_COMMUNICATION_ERROR_PERMISSION_DENIED,
         severity,
-        "{node}: Permission denied ({reason})",
         info={
             "node": node,
             "command": command,
@@ -405,7 +343,6 @@ def node_communication_error_unsupported_command(
     return ReportItem(
         report_codes.NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND,
         severity,
-        "{node}: Unsupported command ({reason}), try upgrading pcsd",
         info={
             "node": node,
             "command": command,
@@ -422,7 +359,6 @@ def node_communication_command_unsuccessful(node, command, reason):
     """
     return ReportItem.error(
         report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
-        "{node}: {reason}",
         info={
             "node": node,
             "command": command,
@@ -442,7 +378,6 @@ def node_communication_error_other_error(
     return ReportItem(
         report_codes.NODE_COMMUNICATION_ERROR,
         severity,
-        "Error connecting to {node} ({reason})",
         info={
             "node": node,
             "command": command,
@@ -463,7 +398,6 @@ def node_communication_error_unable_to_connect(
     return ReportItem(
         report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
         severity,
-        "Unable to connect to {node} ({reason})",
         info={
             "node": node,
             "command": command,
@@ -478,7 +412,6 @@ def corosync_config_distribution_started():
     """
     return ReportItem.info(
         report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED,
-        "Sending updated corosync.conf to nodes..."
     )
 
 def corosync_config_accepted_by_node(node):
@@ -488,7 +421,6 @@ def corosync_config_accepted_by_node(node):
     """
     return ReportItem.info(
         report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
-        "{node}: Succeeded",
         info={"node": node}
     )
 
@@ -503,7 +435,6 @@ def corosync_config_distribution_node_error(
     return ReportItem(
         report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
         severity,
-        "{node}: Unable to set corosync config",
         info={"node": node},
         forceable=forceable
     )
@@ -514,7 +445,6 @@ def corosync_not_running_check_started():
     """
     return ReportItem.info(
         report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED,
-        "Checking corosync is not running on nodes..."
     )
 
 def corosync_not_running_check_node_error(
@@ -528,7 +458,6 @@ def corosync_not_running_check_node_error(
     return ReportItem(
         report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
         severity,
-        "{node}: Unable to check if corosync is not running",
         info={"node": node},
         forceable=forceable
     )
@@ -540,7 +469,6 @@ def corosync_not_running_on_node_ok(node):
     """
     return ReportItem.info(
         report_codes.COROSYNC_NOT_RUNNING_ON_NODE,
-        "{node}: corosync is not running",
         info={"node": node}
     )
 
@@ -551,7 +479,6 @@ def corosync_running_on_node_fail(node):
     """
     return ReportItem.error(
         report_codes.COROSYNC_RUNNING_ON_NODE,
-        "{node}: corosync is running",
         info={"node": node}
     )
 
@@ -562,7 +489,6 @@ def corosync_quorum_get_status_error(reason):
     """
     return ReportItem.error(
         report_codes.COROSYNC_QUORUM_GET_STATUS_ERROR,
-        "Unable to get quorum status: {reason}",
         info={
             "reason": reason,
         }
@@ -575,7 +501,6 @@ def corosync_quorum_set_expected_votes_error(reason):
     """
     return ReportItem.error(
         report_codes.COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR,
-        "Unable to set expected votes: {reason}",
         info={
             "reason": reason,
         }
@@ -587,7 +512,6 @@ def corosync_config_reloaded():
     """
     return ReportItem.info(
         report_codes.COROSYNC_CONFIG_RELOADED,
-        "Corosync configuration reloaded"
     )
 
 def corosync_config_reload_error(reason):
@@ -597,7 +521,6 @@ def corosync_config_reload_error(reason):
     """
     return ReportItem.error(
         report_codes.COROSYNC_CONFIG_RELOAD_ERROR,
-        "Unable to reload corosync configuration: {reason}",
         info={"reason": reason}
     )
 
@@ -608,7 +531,6 @@ def corosync_config_read_error(path, reason):
     """
     return ReportItem.error(
         report_codes.UNABLE_TO_READ_COROSYNC_CONFIG,
-        "Unable to read {path}: {reason}",
         info={
             "path": path,
             "reason": reason,
@@ -621,7 +543,6 @@ def corosync_config_parser_missing_closing_brace():
     """
     return ReportItem.error(
         report_codes.PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE,
-        "Unable to parse corosync config: missing closing brace"
     )
 
 def corosync_config_parser_unexpected_closing_brace():
@@ -630,7 +551,6 @@ def corosync_config_parser_unexpected_closing_brace():
     """
     return ReportItem.error(
         report_codes.PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE,
-        "Unable to parse corosync config: unexpected closing brace"
     )
 
 def corosync_config_parser_other_error():
@@ -640,7 +560,6 @@ def corosync_config_parser_other_error():
     """
     return ReportItem.error(
         report_codes.PARSE_ERROR_COROSYNC_CONF,
-        "Unable to parse corosync config"
     )
 
 def corosync_options_incompatible_with_qdevice(options):
@@ -650,11 +569,8 @@ def corosync_options_incompatible_with_qdevice(options):
     """
     return ReportItem.error(
         report_codes.COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE,
-        "These options cannot be set when the cluster uses a quorum device: "
-        + "{options_names_str}",
         info={
             "options_names": options,
-            "options_names_str": ", ".join(sorted(options)),
         }
     )
 
@@ -664,7 +580,6 @@ def qdevice_already_defined():
     """
     return ReportItem.error(
         report_codes.QDEVICE_ALREADY_DEFINED,
-        "quorum device is already defined"
     )
 
 def qdevice_not_defined():
@@ -673,7 +588,6 @@ def qdevice_not_defined():
     """
     return ReportItem.error(
         report_codes.QDEVICE_NOT_DEFINED,
-        "no quorum device is defined in this cluster"
     )
 
 def qdevice_remove_or_cluster_stop_needed():
@@ -682,7 +596,6 @@ def qdevice_remove_or_cluster_stop_needed():
     """
     return ReportItem.error(
         report_codes.QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED,
-        "You need to stop the cluster or remove qdevice from cluster to continue"
     )
 
 def qdevice_client_reload_started():
@@ -691,7 +604,6 @@ def qdevice_client_reload_started():
     """
     return ReportItem.info(
         report_codes.QDEVICE_CLIENT_RELOAD_STARTED,
-        "Reloading qdevice configuration on nodes..."
     )
 
 def qdevice_already_initialized(model):
@@ -701,7 +613,6 @@ def qdevice_already_initialized(model):
     """
     return ReportItem.error(
         report_codes.QDEVICE_ALREADY_INITIALIZED,
-        "Quorum device '{model}' has been already initialized",
         info={
             "model": model,
         }
@@ -714,7 +625,6 @@ def qdevice_not_initialized(model):
     """
     return ReportItem.error(
         report_codes.QDEVICE_NOT_INITIALIZED,
-        "Quorum device '{model}' has not been initialized yet",
         info={
             "model": model,
         }
@@ -727,7 +637,6 @@ def qdevice_initialization_success(model):
     """
     return ReportItem.info(
         report_codes.QDEVICE_INITIALIZATION_SUCCESS,
-        "Quorum device '{model}' initialized",
         info={
             "model": model,
         }
@@ -741,7 +650,6 @@ def qdevice_initialization_error(model, reason):
     """
     return ReportItem.error(
         report_codes.QDEVICE_INITIALIZATION_ERROR,
-        "Unable to initialize quorum device '{model}': {reason}",
         info={
             "model": model,
             "reason": reason,
@@ -754,7 +662,6 @@ def qdevice_certificate_distribution_started():
     """
     return ReportItem.info(
         report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
-        "Setting up qdevice certificates on nodes..."
     )
 
 def qdevice_certificate_accepted_by_node(node):
@@ -764,7 +671,6 @@ def qdevice_certificate_accepted_by_node(node):
     """
     return ReportItem.info(
         report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
-        "{node}: Succeeded",
         info={"node": node}
     )
 
@@ -774,7 +680,6 @@ def qdevice_certificate_removal_started():
     """
     return ReportItem.info(
         report_codes.QDEVICE_CERTIFICATE_REMOVAL_STARTED,
-        "Removing qdevice certificates from nodes..."
     )
 
 def qdevice_certificate_removed_from_node(node):
@@ -784,7 +689,6 @@ def qdevice_certificate_removed_from_node(node):
     """
     return ReportItem.info(
         report_codes.QDEVICE_CERTIFICATE_REMOVED_FROM_NODE,
-        "{node}: Succeeded",
         info={"node": node}
     )
 
@@ -795,7 +699,6 @@ def qdevice_certificate_import_error(reason):
     """
     return ReportItem.error(
         report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR,
-        "Unable to import quorum device certificate: {reason}",
         info={
             "reason": reason,
         }
@@ -808,7 +711,6 @@ def qdevice_certificate_sign_error(reason):
     """
     return ReportItem.error(
         report_codes.QDEVICE_CERTIFICATE_SIGN_ERROR,
-        "Unable to sign quorum device certificate: {reason}",
         info={
             "reason": reason,
         }
@@ -821,7 +723,6 @@ def qdevice_destroy_success(model):
     """
     return ReportItem.info(
         report_codes.QDEVICE_DESTROY_SUCCESS,
-        "Quorum device '{model}' configuration files removed",
         info={
             "model": model,
         }
@@ -835,7 +736,6 @@ def qdevice_destroy_error(model, reason):
     """
     return ReportItem.error(
         report_codes.QDEVICE_DESTROY_ERROR,
-        "Unable to destroy quorum device '{model}': {reason}",
         info={
             "model": model,
             "reason": reason,
@@ -849,7 +749,6 @@ def qdevice_not_running(model):
     """
     return ReportItem.error(
         report_codes.QDEVICE_NOT_RUNNING,
-        "Quorum device '{model}' is not running",
         info={
             "model": model,
         }
@@ -863,7 +762,6 @@ def qdevice_get_status_error(model, reason):
     """
     return ReportItem.error(
         report_codes.QDEVICE_GET_STATUS_ERROR,
-        "Unable to get status of quorum device '{model}': {reason}",
         info={
             "model": model,
             "reason": reason,
@@ -879,10 +777,8 @@ def qdevice_used_by_clusters(
     return ReportItem(
         report_codes.QDEVICE_USED_BY_CLUSTERS,
         severity,
-        "Quorum device is currently being used by cluster(s): {clusters_str}",
         info={
             "clusters": clusters,
-            "clusters_str": ", ".join(clusters),
         },
         forceable=forceable
     )
@@ -893,7 +789,6 @@ def cman_unsupported_command():
     """
     return ReportItem.error(
         report_codes.CMAN_UNSUPPORTED_COMMAND,
-        "This command is not supported on CMAN clusters"
     )
 
 def id_already_exists(id):
@@ -903,7 +798,6 @@ def id_already_exists(id):
     """
     return ReportItem.error(
         report_codes.ID_ALREADY_EXISTS,
-        "'{id}' already exists",
         info={"id": id}
     )
 
@@ -916,7 +810,6 @@ def id_not_found(id, id_description):
     """
     return ReportItem.error(
         report_codes.ID_NOT_FOUND,
-        ("{id_description} " if id_description else "") + "'{id}' does not exist",
         info={
             "id": id,
             "id_description": id_description,
@@ -930,7 +823,6 @@ def resource_does_not_exist(resource_id):
     """
     return ReportItem.error(
         report_codes.RESOURCE_DOES_NOT_EXIST,
-        "Resource '{resource_id}' does not exist",
         info={
             "resource_id": resource_id,
         }
@@ -943,7 +835,6 @@ def cib_load_error(reason):
     """
     return ReportItem.error(
         report_codes.CIB_LOAD_ERROR,
-        "unable to get cib",
         info={
             "reason": reason,
         }
@@ -957,7 +848,6 @@ def cib_load_error_scope_missing(scope, reason):
     """
     return ReportItem.error(
         report_codes.CIB_LOAD_ERROR_SCOPE_MISSING,
-        "unable to get cib, scope '{scope}' not present in cib",
         info={
             "scope": scope,
             "reason": reason,
@@ -970,7 +860,6 @@ def cib_load_error_invalid_format():
     """
     return ReportItem.error(
         report_codes.CIB_LOAD_ERROR_BAD_FORMAT,
-        "unable to get cib, xml does not conform to the schema"
     )
 
 def cib_missing_mandatory_section(section_name):
@@ -980,7 +869,6 @@ def cib_missing_mandatory_section(section_name):
     """
     return ReportItem.error(
         report_codes.CIB_CANNOT_FIND_MANDATORY_SECTION,
-        "Unable to get {section} section of cib",
         info={
             "section": section_name,
         }
@@ -994,7 +882,6 @@ def cib_push_error(reason, pushed_cib):
     """
     return ReportItem.error(
         report_codes.CIB_PUSH_ERROR,
-        "Unable to update cib\n{reason}\n{pushed_cib}",
         info={
             "reason": reason,
             "pushed_cib": pushed_cib,
@@ -1008,7 +895,6 @@ def cluster_state_cannot_load(reason):
     """
     return ReportItem.error(
         report_codes.CRM_MON_ERROR,
-        "error running crm_mon, is pacemaker running?",
         info={
             "reason": reason,
         }
@@ -1020,7 +906,6 @@ def cluster_state_invalid_format():
     """
     return ReportItem.error(
         report_codes.BAD_CLUSTER_STATE_FORMAT,
-        "cannot load cluster status, xml does not conform to the schema"
     )
 
 def resource_wait_not_supported():
@@ -1029,7 +914,6 @@ def resource_wait_not_supported():
     """
     return ReportItem.error(
         report_codes.RESOURCE_WAIT_NOT_SUPPORTED,
-        "crm_resource does not support --wait, please upgrade pacemaker"
     )
 
 def resource_wait_timed_out(reason):
@@ -1039,7 +923,6 @@ def resource_wait_timed_out(reason):
     """
     return ReportItem.error(
         report_codes.RESOURCE_WAIT_TIMED_OUT,
-        "waiting timeout\n\n{reason}",
         info={
             "reason": reason,
         }
@@ -1052,7 +935,6 @@ def resource_wait_error(reason):
     """
     return ReportItem.error(
         report_codes.RESOURCE_WAIT_ERROR,
-        "{reason}",
         info={
             "reason": reason,
         }
@@ -1065,15 +947,8 @@ def resource_cleanup_error(reason, resource=None, node=None):
     string resource resource which has been cleaned up
     string node node which has been cleaned up
     """
-    if resource:
-        text = "Unable to cleanup resource: {resource}\n{reason}"
-    else:
-        text = (
-            "Unexpected error occured. 'crm_resource -C' error:\n{reason}"
-        )
     return ReportItem.error(
         report_codes.RESOURCE_CLEANUP_ERROR,
-        text,
         info={
             "reason": reason,
             "resource": resource,
@@ -1088,11 +963,6 @@ def resource_cleanup_too_time_consuming(threshold):
     """
     return ReportItem.error(
         report_codes.RESOURCE_CLEANUP_TOO_TIME_CONSUMING,
-        "Cleaning up all resources on all nodes will execute more "
-            + "than {threshold} operations in the cluster, which may "
-            + "negatively impact the responsiveness of the cluster. "
-            + "Consider specifying resource and/or node"
-        ,
         info={"threshold": threshold},
         forceable=report_codes.FORCE_LOAD_THRESHOLD
     )
@@ -1104,7 +974,6 @@ def node_not_found(node):
     """
     return ReportItem.error(
         report_codes.NODE_NOT_FOUND,
-        "node '{node}' does not appear to exist in configuration",
         info={"node": node}
     )
 
@@ -1115,7 +984,6 @@ def pacemaker_local_node_name_not_found(reason):
     """
     return ReportItem.error(
         report_codes.PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND,
-        "unable to get local node name from pacemaker: {reason}",
         info={"reason": reason}
     )
 
@@ -1127,7 +995,6 @@ def rrp_active_not_supported(warning=False):
     return ReportItem(
         report_codes.RRP_ACTIVE_NOT_SUPPORTED,
         ReportItemSeverity.WARNING if warning else ReportItemSeverity.ERROR,
-        "using a RRP mode of 'active' is not supported or tested",
         forceable=(None if warning else report_codes.FORCE_ACTIVE_RRP)
     )
 
@@ -1138,19 +1005,15 @@ def cman_ignored_option(option):
     """
     return ReportItem.warning(
         report_codes.IGNORED_CMAN_UNSUPPORTED_OPTION,
-        '{option_name} ignored as it is not supported on CMAN clusters',
         info={'option_name': option}
     )
 
 def rrp_addresses_transport_mismatch():
-    # TODO this knows too much about cmdline and needs to be fixed once
-    # client code is moved to library, probably by CmdLineInputError in cli
     """
     RRP defined by network addresses is not allowed when udp transport is used
     """
     return ReportItem.error(
         report_codes.NON_UDP_TRANSPORT_ADDR_MISMATCH,
-        "--addr0 and --addr1 can only be used with --transport=udp"
     )
 
 def cman_udpu_restart_required():
@@ -1159,8 +1022,6 @@ def cman_udpu_restart_required():
     """
     return ReportItem.warning(
         report_codes.CMAN_UDPU_RESTART_REQUIRED,
-        "Using udpu transport on a CMAN cluster, "
-            + "cluster restart is required after node add or remove"
     )
 
 def cman_broadcast_all_rings():
@@ -1169,8 +1030,6 @@ def cman_broadcast_all_rings():
     """
     return ReportItem.warning(
         report_codes.CMAN_BROADCAST_ALL_RINGS,
-        "Enabling broadcast for all rings as CMAN does not support "
-            + "broadcast in only one ring"
     )
 
 def service_start_started(service, instance=None):
@@ -1179,13 +1038,8 @@ def service_start_started(service, instance=None):
     string service service name or description
     string instance instance of service
     """
-    if instance:
-        msg = "Starting {service}@{instance}..."
-    else:
-        msg = "Starting {service}..."
     return ReportItem.info(
         report_codes.SERVICE_START_STARTED,
-        msg,
         info={
             "service": service,
             "instance": instance,
@@ -1200,13 +1054,8 @@ def service_start_error(service, reason, node=None, instance=None):
     string node node on which service has been requested to start
     string instance instance of service
     """
-    if instance:
-        msg = "Unable to start {service}@{instance}: {reason}"
-    else:
-        msg = "Unable to start {service}: {reason}"
     return ReportItem.error(
         report_codes.SERVICE_START_ERROR,
-        msg if node is None else "{node}: " + msg,
         info={
             "service": service,
             "reason": reason,
@@ -1222,13 +1071,8 @@ def service_start_success(service, node=None, instance=None):
     string node node on which service has been requested to start
     string instance instance of service
     """
-    if instance:
-        msg = "{service}@{instance} started"
-    else:
-        msg = "{service} started"
     return ReportItem.info(
         report_codes.SERVICE_START_SUCCESS,
-        msg if node is None else "{node}: " + msg,
         info={
             "service": service,
             "node": node,
@@ -1244,13 +1088,8 @@ def service_start_skipped(service, reason, node=None, instance=None):
     string node node on which service has been requested to start
     string instance instance of service
     """
-    if instance:
-        msg = "not starting {service}@{instance} - {reason}"
-    else:
-        msg = "not starting {service} - {reason}"
     return ReportItem.info(
         report_codes.SERVICE_START_SKIPPED,
-        msg if node is None else "{node}: " + msg,
         info={
             "service": service,
             "reason": reason,
@@ -1265,13 +1104,8 @@ def service_stop_started(service, instance=None):
     string service service name or description
     string instance instance of service
     """
-    if instance:
-        msg = "Stopping {service}@{instance}..."
-    else:
-        msg = "Stopping {service}..."
     return ReportItem.info(
         report_codes.SERVICE_STOP_STARTED,
-        msg,
         info={
             "service": service,
             "instance": instance,
@@ -1286,13 +1120,8 @@ def service_stop_error(service, reason, node=None, instance=None):
     string node node on which service has been requested to stop
     string instance instance of service
     """
-    if instance:
-        msg = "Unable to stop {service}@{instance}: {reason}"
-    else:
-        msg = "Unable to stop {service}: {reason}"
     return ReportItem.error(
         report_codes.SERVICE_STOP_ERROR,
-        msg if node is None else "{node}: " + msg,
         info={
             "service": service,
             "reason": reason,
@@ -1308,13 +1137,8 @@ def service_stop_success(service, node=None, instance=None):
     string node node on which service has been requested to stop
     string instance instance of service
     """
-    if instance:
-        msg = "{service}@{instance} stopped"
-    else:
-        msg = "{service} stopped"
     return ReportItem.info(
         report_codes.SERVICE_STOP_SUCCESS,
-        msg if node is None else "{node}: " + msg,
         info={
             "service": service,
             "node": node,
@@ -1330,10 +1154,8 @@ def service_kill_error(services, reason):
     """
     return ReportItem.error(
         report_codes.SERVICE_KILL_ERROR,
-        "Unable to kill {services_str}: {reason}",
         info={
             "services": services,
-            "services_str": ", ".join(services),
             "reason": reason,
         }
     )
@@ -1345,10 +1167,8 @@ def service_kill_success(services):
     """
     return ReportItem.info(
         report_codes.SERVICE_KILL_SUCCESS,
-        "{services_str} killed",
         info={
             "services": services,
-            "services_str": ", ".join(services),
         }
     )
 
@@ -1358,13 +1178,8 @@ def service_enable_started(service, instance=None):
     string service service name or description
     string instance instance of service
     """
-    if instance:
-        msg = "Enabling {service}@{instance}..."
-    else:
-        msg = "Enabling {service}..."
     return ReportItem.info(
         report_codes.SERVICE_ENABLE_STARTED,
-        msg,
         info={
             "service": service,
             "instance": instance,
@@ -1379,13 +1194,8 @@ def service_enable_error(service, reason, node=None, instance=None):
     string node node on which service was enabled
     string instance instance of service
     """
-    if instance:
-        msg = "Unable to enable {service}@{instance}: {reason}"
-    else:
-        msg = "Unable to enable {service}: {reason}"
     return ReportItem.error(
         report_codes.SERVICE_ENABLE_ERROR,
-        msg if node is None else "{node}: " + msg,
         info={
             "service": service,
             "reason": reason,
@@ -1401,13 +1211,8 @@ def service_enable_success(service, node=None, instance=None):
     string node node on which service has been enabled
     string instance instance of service
     """
-    if instance:
-        msg = "{service}@{instance} enabled"
-    else:
-        msg = "{service} enabled"
     return ReportItem.info(
         report_codes.SERVICE_ENABLE_SUCCESS,
-        msg if node is None else "{node}: " + msg,
         info={
             "service": service,
             "node": node,
@@ -1423,13 +1228,8 @@ def service_enable_skipped(service, reason, node=None, instance=None):
     string node node on which service has been requested to enable
     string instance instance of service
     """
-    if instance:
-        msg = "not enabling {service}@{instance} - {reason}"
-    else:
-        msg = "not enabling {service} - {reason}"
     return ReportItem.info(
         report_codes.SERVICE_ENABLE_SKIPPED,
-        msg if node is None else "{node}: " + msg,
         info={
             "service": service,
             "reason": reason,
@@ -1444,13 +1244,8 @@ def service_disable_started(service, instance=None):
     string service service name or description
     string instance instance of service
     """
-    if instance:
-        msg = "Disabling {service}@{instance}..."
-    else:
-        msg = "Disabling {service}..."
     return ReportItem.info(
         report_codes.SERVICE_DISABLE_STARTED,
-        msg,
         info={
             "service": service,
             "instance": instance,
@@ -1465,13 +1260,8 @@ def service_disable_error(service, reason, node=None, instance=None):
     string node node on which service was disabled
     string instance instance of service
     """
-    if instance:
-        msg = "Unable to disable {service}@{instance}: {reason}"
-    else:
-        msg = "Unable to disable {service}: {reason}"
     return ReportItem.error(
         report_codes.SERVICE_DISABLE_ERROR,
-        msg if node is None else "{node}: " + msg,
         info={
             "service": service,
             "reason": reason,
@@ -1487,13 +1277,8 @@ def service_disable_success(service, node=None, instance=None):
     string node node on which service was disabled
     string instance instance of service
     """
-    if instance:
-        msg = "{service}@{instance} disabled"
-    else:
-        msg = "{service} disabled"
     return ReportItem.info(
         report_codes.SERVICE_DISABLE_SUCCESS,
-        msg if node is None else "{node}: " + msg,
         info={
             "service": service,
             "node": node,
@@ -1501,16 +1286,6 @@ def service_disable_success(service, node=None, instance=None):
         }
     )
 
-def invalid_metadata_format(severity=ReportItemSeverity.ERROR, forceable=None):
-    """
-    Invalid format of metadata
-    """
-    return ReportItem(
-        report_codes.INVALID_METADATA_FORMAT,
-        severity,
-        "Invalid metadata format",
-        forceable=forceable
-    )
 
 def unable_to_get_agent_metadata(
     agent, reason, severity=ReportItemSeverity.ERROR, forceable=None
@@ -1518,13 +1293,12 @@ def unable_to_get_agent_metadata(
     """
     There were some issues trying to get metadata of agent
 
-    agent -- agent which metadata were unable to obtain
-    reason -- reason of failure
+    string agent agent which metadata were unable to obtain
+    string reason reason of failure
     """
     return ReportItem(
         report_codes.UNABLE_TO_GET_AGENT_METADATA,
         severity,
-        "Unable to get metadata of '{agent}': {reason}",
         info={
             "agent": agent,
             "reason": reason
@@ -1532,49 +1306,61 @@ def unable_to_get_agent_metadata(
         forceable=forceable
     )
 
-
-def agent_not_found(agent, severity=ReportItemSeverity.ERROR, forceable=None):
+def invalid_resource_agent_name(name):
     """
-    Specified agent doesn't exist
+    The entered resource agent name is not valid.
+    This name has the internal structure. The code needs to work with parts of
+    this structure and fails if parts can not be obtained.
 
-    agent -- name of agent which doesn't exist
+    string name is entered name
     """
-    return ReportItem(
-        report_codes.AGENT_NOT_FOUND,
-        severity,
-        "Agent '{agent}' not found",
-        info={"agent": agent},
-        forceable=forceable
+    return ReportItem.error(
+        report_codes.INVALID_RESOURCE_AGENT_NAME,
+        info={
+            "name": name,
+        }
     )
 
+def agent_name_guessed(entered_name, guessed_name):
+    """
+    Resource agent name was deduced from the entered name.
+    Pcs supports the using of abbreviated resource agent name (e.g.
+    ocf:heartbeat:Delay => Delay) when it can be clearly deduced.
 
-def agent_not_supported(
-    agent, severity=ReportItemSeverity.ERROR, forceable=None
-):
+    string entered_name is entered name
+    string guessed_name is deduced name
     """
-    Specified agent is not supported
+    return ReportItem.info(
+        report_codes.AGENT_NAME_GUESSED,
+        info={
+            "entered_name": entered_name,
+            "guessed_name": guessed_name,
+        }
+    )
 
-    agent -- name of agent which is not supported
+def agent_name_guess_found_more_than_one(agent, possible_agents):
     """
-    return ReportItem(
-        report_codes.UNSUPPORTED_AGENT,
-        severity,
-        "Agent '{agent}' is not supported",
-        info={"agent": agent},
-        forceable=forceable
+    More than one agents found based on the search string, specify one of them
+    string agent searched name of an agent
+    iterable possible_agents full names of agents matching the search
+    """
+    return ReportItem.error(
+        report_codes.AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE,
+        info={
+            "agent": agent,
+            "possible_agents": possible_agents,
+            "possible_agents_str": ", ".join(sorted(possible_agents)),
+        }
     )
 
 
-def resource_agent_general_error(agent=None):
+def agent_name_guess_found_none(agent):
     """
-    General not specific error of resource or fence agent.
-
-    agent -- agent name
+    Specified agent doesn't exist
+    string agent name of the agent which doesn't exist
     """
-    msg = "Unspecified problem of resource/fence agent"
     return ReportItem.error(
-        report_codes.AGENT_GENERAL_ERROR,
-        msg if agent is None else msg + " '{agent}'",
+        report_codes.AGENT_NAME_GUESS_FOUND_NONE,
         info={"agent": agent}
     )
 
@@ -1587,7 +1373,6 @@ def omitting_node(node):
     """
     return ReportItem.warning(
         report_codes.OMITTING_NODE,
-        "Omitting node '{node}'",
         info={"node": node}
     )
 
@@ -1598,7 +1383,6 @@ def sbd_check_started():
     """
     return ReportItem.info(
         report_codes.SBD_CHECK_STARTED,
-        "Running SBD pre-enabling checks..."
     )
 
 
@@ -1610,7 +1394,6 @@ def sbd_check_success(node):
     """
     return ReportItem.info(
         report_codes.SBD_CHECK_SUCCESS,
-        "{node}: SBD pre-enabling checks done",
         info={"node": node}
     )
 
@@ -1621,7 +1404,6 @@ def sbd_config_distribution_started():
     """
     return ReportItem.info(
         report_codes.SBD_CONFIG_DISTRIBUTION_STARTED,
-        "Distributing SBD config..."
     )
 
 
@@ -1633,7 +1415,6 @@ def sbd_config_accepted_by_node(node):
     """
     return ReportItem.info(
         report_codes.SBD_CONFIG_ACCEPTED_BY_NODE,
-        "{node}: SBD config saved",
         info={"node": node}
     )
 
@@ -1649,7 +1430,6 @@ def unable_to_get_sbd_config(node, reason, severity=ReportItemSeverity.ERROR):
     return ReportItem(
         report_codes.UNABLE_TO_GET_SBD_CONFIG,
         severity,
-        "Unable to get SBD configuration from node '{node}': {reason}",
         info={
             "node": node,
             "reason": reason
@@ -1663,7 +1443,6 @@ def sbd_enabling_started():
     """
     return ReportItem.info(
         report_codes.SBD_ENABLING_STARTED,
-        "Enabling SBD service..."
     )
 
 
@@ -1673,7 +1452,6 @@ def sbd_disabling_started():
     """
     return ReportItem.info(
         report_codes.SBD_DISABLING_STARTED,
-        "Disabling SBD service..."
     )
 
 
@@ -1686,7 +1464,6 @@ def invalid_response_format(node):
     """
     return ReportItem.error(
         report_codes.INVALID_RESPONSE_FORMAT,
-        "{node}: Invalid format of response",
         info={"node": node}
     )
 
@@ -1699,7 +1476,6 @@ def sbd_not_installed(node):
     """
     return ReportItem.error(
         report_codes.SBD_NOT_INSTALLED,
-        "SBD is not installed on node '{node}'",
         info={"node": node}
     )
 
@@ -1713,7 +1489,6 @@ def watchdog_not_found(node, watchdog):
     """
     return ReportItem.error(
         report_codes.WATCHDOG_NOT_FOUND,
-        "Watchdog '{watchdog}' does not exist on node '{node}'",
         info={
             "node": node,
             "watchdog": watchdog
@@ -1729,7 +1504,6 @@ def invalid_watchdog_path(watchdog):
     """
     return ReportItem.error(
         report_codes.WATCHDOG_INVALID,
-        "Watchdog path '{watchdog}' is invalid.",
         info={"watchdog": watchdog}
     )
 
@@ -1744,7 +1518,6 @@ def unable_to_get_sbd_status(node, reason):
     """
     return ReportItem.warning(
         report_codes.UNABLE_TO_GET_SBD_STATUS,
-        "Unable to get status of SBD from node '{node}': {reason}",
         info={
             "node": node,
             "reason": reason
@@ -1757,7 +1530,6 @@ def cluster_restart_required_to_apply_changes():
     """
     return ReportItem.warning(
         report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES,
-        "Cluster restart is required in order to apply these changes."
     )
 
 
@@ -1773,7 +1545,6 @@ def cib_alert_recipient_already_exists(
     return ReportItem(
         report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
         severity,
-        "Recipient '{recipient}' in alert '{alert}' already exists",
         info={
             "recipient": recipient_value,
             "alert": alert_id
@@ -1790,7 +1561,6 @@ def cib_alert_recipient_invalid_value(recipient_value):
     """
     return ReportItem.error(
         report_codes.CIB_ALERT_RECIPIENT_VALUE_INVALID,
-        "Recipient value '{recipient}' is not valid.",
         info={"recipient": recipient_value}
     )
 
@@ -1802,7 +1572,6 @@ def cib_alert_not_found(alert_id):
     """
     return ReportItem.error(
         report_codes.CIB_ALERT_NOT_FOUND,
-        "Alert '{alert}' not found.",
         info={"alert": alert_id}
     )
 
@@ -1813,7 +1582,6 @@ def cib_upgrade_successful():
     """
     return ReportItem.info(
         report_codes.CIB_UPGRADE_SUCCESSFUL,
-        "CIB has been upgraded to the latest schema version."
     )
 
 
@@ -1825,7 +1593,6 @@ def cib_upgrade_failed(reason):
     """
     return ReportItem.error(
         report_codes.CIB_UPGRADE_FAILED,
-        "Upgrading of CIB to the latest schema failed: {reason}",
         info={"reason": reason}
     )
 
@@ -1841,9 +1608,6 @@ def unable_to_upgrade_cib_to_required_version(
     """
     return ReportItem.error(
         report_codes.CIB_UPGRADE_FAILED_TO_MINIMAL_REQUIRED_VERSION,
-        "Unable to upgrade CIB to required schema version {required_version} "
-        "or higher. Current version is {current_version}. Newer version of "
-        "pacemaker is needed.",
         info={
             "required_version": "{0}.{1}.{2}".format(*required_version),
             "current_version": "{0}.{1}.{2}".format(*current_version)
@@ -1854,15 +1618,9 @@ def file_already_exists(
         file_role, file_path, severity=ReportItemSeverity.ERROR,
         forceable=None, node=None
     ):
-    msg = "file {file_path} already exists"
-    if file_role:
-        msg = "{file_role} " + msg
-    if node:
-        msg = "{node}: " + msg
     return ReportItem(
         report_codes.FILE_ALREADY_EXISTS,
         severity,
-        msg,
         info={
             "file_role": file_role,
             "file_path": file_path,
@@ -1874,7 +1632,6 @@ def file_already_exists(
 def file_does_not_exist(file_role, file_path=""):
     return ReportItem.error(
         report_codes.FILE_DOES_NOT_EXIST,
-        "{file_role} file {file_path} does not exist",
         info={
             "file_role": file_role,
             "file_path": file_path,
@@ -1885,14 +1642,9 @@ def file_io_error(
     file_role, file_path="", reason="", operation="work with",
     severity=ReportItemSeverity.ERROR
 ):
-    if file_path:
-        msg = "unable to {operation} {file_role} '{file_path}': {reason}"
-    else:
-        msg = "unable to {operation} {file_role}: {reason}"
     return ReportItem(
         report_codes.FILE_IO_ERROR,
         severity,
-        msg,
         info={
             "file_role": file_role,
             "file_path": file_path,
@@ -1904,7 +1656,6 @@ def file_io_error(
 def unable_to_determine_user_uid(user):
     return ReportItem.error(
         report_codes.UNABLE_TO_DETERMINE_USER_UID,
-        "Unable to determine uid of user '{user}'",
         info={
             "user": user
         }
@@ -1913,7 +1664,6 @@ def unable_to_determine_user_uid(user):
 def unable_to_determine_group_gid(group):
     return ReportItem.error(
         report_codes.UNABLE_TO_DETERMINE_GROUP_GID,
-        "Unable to determine gid of group '{group}'",
         info={
             "group": group
         }
@@ -1922,16 +1672,13 @@ def unable_to_determine_group_gid(group):
 def unsupported_operation_on_non_systemd_systems():
     return ReportItem.error(
         report_codes.UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS,
-        "unsupported operation on non systemd systems"
     )
 
 def live_environment_required(forbidden_options):
     return ReportItem.error(
         report_codes.LIVE_ENVIRONMENT_REQUIRED,
-        "This command does not support {options_string}",
         info={
             "forbidden_options": forbidden_options,
-            "options_string": ", ".join(forbidden_options),
         }
     )
 
@@ -1945,7 +1692,6 @@ def quorum_cannot_disable_atb_due_to_sbd(
     return ReportItem(
         report_codes.COROSYNC_QUORUM_CANNOT_DISABLE_ATB_DUE_TO_SBD,
         severity,
-        "unable to disable auto_tie_breaker: SBD fencing will have no effect",
         forceable=forceable
     )
 
@@ -1956,7 +1702,67 @@ def sbd_requires_atb():
     """
     return ReportItem.warning(
         report_codes.SBD_REQUIRES_ATB,
-        "auto_tie_breaker quorum option will be enabled to make SBD fencing "
-        "effective. Cluster has to be offline to be able to make this change."
     )
 
+
+def acl_role_is_already_assigned_to_target(role_id, target_id):
+    """
+    Error that ACL target or group has already assigned role.
+    """
+    return ReportItem.error(
+        report_codes.CIB_ACL_ROLE_IS_ALREADY_ASSIGNED_TO_TARGET,
+        info={
+            "role_id": role_id,
+            "target_id": target_id,
+        }
+    )
+
+
+def acl_role_is_not_assigned_to_target(role_id, target_id):
+    """
+    Error that acl role is not assigned to target or group
+    """
+    return ReportItem.error(
+        report_codes.CIB_ACL_ROLE_IS_NOT_ASSIGNED_TO_TARGET,
+        info={
+            "role_id": role_id,
+            "target_id": target_id,
+        }
+    )
+
+
+def acl_target_already_exists(target_id):
+    """
+    Error that target with specified id aleready axists in configuration.
+    """
+    return ReportItem.error(
+        report_codes.CIB_ACL_TARGET_ALREADY_EXISTS,
+        info={
+            "target_id": target_id,
+        }
+    )
+
+
+def cluster_conf_invalid_format(reason):
+    """
+    cluster.conf parsing error
+    """
+    return ReportItem.error(
+        report_codes.CLUSTER_CONF_LOAD_ERROR_INVALID_FORMAT,
+        info={
+            "reason": reason,
+        }
+    )
+
+
+def cluster_conf_read_error(path, reason):
+    """
+    Unable to read cluster.conf
+    """
+    return ReportItem.error(
+        report_codes.CLUSTER_CONF_READ_ERROR,
+        info={
+            "path": path,
+            "reason": reason,
+        }
+    )
diff --git a/pcs/lib/resource_agent.py b/pcs/lib/resource_agent.py
index d49b5c0..150f2b4 100644
--- a/pcs/lib/resource_agent.py
+++ b/pcs/lib/resource_agent.py
@@ -6,429 +6,632 @@ from __future__ import (
 )
 
 import os
+import re
 from lxml import etree
 
 from pcs import settings
 from pcs.lib import reports
-from pcs.lib.errors import ReportItemSeverity
+from pcs.lib.errors import LibraryError, ReportItemSeverity
 from pcs.lib.pacemaker_values import is_true
-from pcs.lib.external import is_path_runnable
 from pcs.common import report_codes
-from pcs.common.tools import simple_cache
 
 
-class ResourceAgentLibError(Exception):
-    pass
+_crm_resource = os.path.join(settings.pacemaker_binaries, "crm_resource")
 
 
-class ResourceAgentCommonError(ResourceAgentLibError):
+class ResourceAgentError(Exception):
     # pylint: disable=super-init-not-called
-    def __init__(self, agent):
-        self.agent = agent
-
-
-class UnsupportedResourceAgent(ResourceAgentCommonError):
-    pass
-
-
-class AgentNotFound(ResourceAgentCommonError):
-    pass
-
-
-class UnableToGetAgentMetadata(ResourceAgentCommonError):
-    # pylint: disable=super-init-not-called
-    def __init__(self, agent, message):
+    def __init__(self, agent, message=""):
         self.agent = agent
         self.message = message
 
 
-class InvalidMetadataFormat(ResourceAgentLibError):
+class UnableToGetAgentMetadata(ResourceAgentError):
     pass
 
-
-def __is_path_abs(path):
-    return path == os.path.abspath(path)
+class InvalidResourceAgentName(ResourceAgentError):
+    pass
 
 
-def __get_text_from_dom_element(element):
-    if element is None or element.text is None:
-        return ""
-    else:
-        return element.text.strip()
+def list_resource_agents_standards(runner):
+    """
+    Return list of resource agents standards (ocf, lsb, ... ) on the local host
+    CommandRunner runner
+    """
+    # retval is number of standards found
+    stdout, dummy_stderr, dummy_retval = runner.run([
+        _crm_resource, "--list-standards"
+    ])
+    ignored_standards = frozenset([
+        # we are only interested in RESOURCE agents
+        "stonith",
+    ])
+    return _prepare_agent_list(stdout, ignored_standards)
+
+
+def list_resource_agents_ocf_providers(runner):
+    """
+    Return list of resource agents ocf providers on the local host
+    CommandRunner runner
+    """
+    # retval is number of providers found
+    stdout, dummy_stderr, dummy_retval = runner.run([
+        _crm_resource, "--list-ocf-providers"
+    ])
+    return _prepare_agent_list(stdout)
 
 
-def _get_parameter(parameter_dom):
+def list_resource_agents_standards_and_providers(runner):
     """
-    Returns dictionary that describes parameter.
-    dictionary format:
-    {
-        name: name of parameter
-        longdesc: long description,
-        shortdesc: short description,
-        type: data type od parameter,
-        default: default value,
-        required: True if is required parameter, False otherwise
-    }
-    Raises InvalidMetadataFormat if parameter_dom is not in valid format
-
-    parameter_dom -- parameter dom element
+    Return list of all standard[:provider] on the local host
+    CommandRunner runner
     """
-    if parameter_dom.tag != "parameter" or parameter_dom.get("name") is None:
-        raise InvalidMetadataFormat()
-
-    longdesc = __get_text_from_dom_element(parameter_dom.find("longdesc"))
-    shortdesc = __get_text_from_dom_element(parameter_dom.find("shortdesc"))
+    standards = (
+        list_resource_agents_standards(runner)
+        +
+        [
+            "ocf:{0}".format(provider)
+            for provider in list_resource_agents_ocf_providers(runner)
+        ]
+    )
+    # do not list ocf resources twice
+    try:
+        standards.remove("ocf")
+    except ValueError:
+        pass
+    return sorted(
+        standards,
+        # works with both str and unicode in both python 2 and 3
+        key=lambda x: x.lower()
+    )
 
-    content = parameter_dom.find("content")
-    if content is None:
-        val_type = "string"
-    else:
-        val_type = content.get("type", "string")
 
-    return {
-        "name": parameter_dom.get("name"),
-        "longdesc": longdesc,
-        "shortdesc": shortdesc,
-        "type": val_type,
-        "default": None if content is None else content.get("default"),
-        "required": is_true(parameter_dom.get("required", "0"))
-    }
+def list_resource_agents(runner, standard_provider):
+    """
+    Return list of resource agents for specified standard on the local host
+    CommandRunner runner
+    string standard_provider standard[:provider], e.g. lsb, ocf, ocf:pacemaker
+    """
+    # retval is 0 on success, anything else when no agents found
+    stdout, dummy_stderr, retval = runner.run([
+        _crm_resource, "--list-agents", standard_provider
+    ])
+    if retval != 0:
+        return []
+    return _prepare_agent_list(stdout)
 
 
-def _get_agent_parameters(metadata_dom):
+def list_stonith_agents(runner):
     """
-    Returns list of parameters from agents metadata.
-    Raises InvalidMetadataFormat if metadata_dom is not in valid format.
-
-    metadata_dom -- agent's metadata dom
+    Return list of fence agents on the local host
+    CommandRunner runner
     """
-    if metadata_dom.tag != "resource-agent":
-        raise InvalidMetadataFormat()
-
-    params_el = metadata_dom.find("parameters")
-    if params_el is None:
+    # retval is 0 on success, anything else when no agents found
+    stdout, dummy_stderr, retval = runner.run([
+        _crm_resource, "--list-agents", "stonith"
+    ])
+    if retval != 0:
         return []
+    ignored_agents = frozenset([
+        "fence_ack_manual",
+        "fence_check",
+        "fence_kdump_send",
+        "fence_legacy",
+        "fence_na",
+        "fence_node",
+        "fence_nss_wrapper",
+        "fence_pcmk",
+        "fence_sanlockd",
+        "fence_tool",
+        "fence_virtd",
+        "fence_vmware_helper",
+    ])
+    return _prepare_agent_list(stdout, ignored_agents)
+
+
+def _prepare_agent_list(agents_string, filter_list=None):
+    ignored = frozenset(filter_list) if filter_list else frozenset([])
+    result = [
+        name
+        for name in [line.strip() for line in agents_string.splitlines()]
+        if name and name not in ignored
+    ]
+    return sorted(
+        result,
+        # works with both str and unicode in both python 2 and 3
+        key=lambda x: x.lower()
+    )
+
 
+def guess_resource_agent_full_name(runner, search_agent_name):
+    """
+    List resource agents matching specified search term
+    string search_agent_name part of full agent name
+    """
+    search_lower = search_agent_name.lower()
+    # list all possible names
+    possible_names = []
+    for std in list_resource_agents_standards_and_providers(runner):
+        for agent in list_resource_agents(runner, std):
+            if search_lower == agent.lower():
+                possible_names.append("{0}:{1}".format(std, agent))
+    # construct agent wrappers
+    agent_candidates = [
+        ResourceAgent(runner, agent) for agent in possible_names
+    ]
+    # check if the agent is valid
     return [
-        _get_parameter(parameter) for parameter in params_el.iter("parameter")
+        agent for agent in agent_candidates if agent.is_valid_metadata()
     ]
 
 
-def _get_pcmk_advanced_stonith_parameters(runner):
+def guess_exactly_one_resource_agent_full_name(runner, search_agent_name):
     """
-    Returns advanced instance attributes for stonith devices
-    Raises UnableToGetAgentMetadata if there is problem with obtaining
-        metadata of stonithd.
-    Raises InvalidMetadataFormat if obtained metadata are not in valid format.
-
-    runner -- CommandRunner
+    Get one resource agent matching specified search term
+    string search_agent_name last part of full agent name
+    Raise LibraryError if zero or more than one agents found
     """
-    @simple_cache
-    def __get_stonithd_parameters():
-        stdout, stderr, dummy_retval = runner.run(
-            [settings.stonithd_binary, "metadata"]
+    agents = guess_resource_agent_full_name(runner, search_agent_name)
+    if not agents:
+        raise LibraryError(
+            reports.agent_name_guess_found_none(search_agent_name)
         )
-        if stdout.strip() == "":
-            raise UnableToGetAgentMetadata("stonithd", stderr)
-
-        try:
-            params = _get_agent_parameters(etree.fromstring(stdout))
-            for param in params:
-                param["longdesc"] = "{0}\n{1}".format(
-                    param["shortdesc"], param["longdesc"]
-                ).strip()
-                is_advanced = param["shortdesc"].startswith("Advanced use only")
-                param["advanced"] = is_advanced
-            return params
-        except etree.XMLSyntaxError:
-            raise InvalidMetadataFormat()
-
-    return __get_stonithd_parameters()
+    if len(agents) > 1:
+        raise LibraryError(
+            reports.agent_name_guess_found_more_than_one(
+                search_agent_name,
+                [agent.get_name() for agent in agents]
+            )
+        )
+    return agents[0]
 
+def find_valid_resource_agent_by_name(
+    report_processor, runner, name, allowed_absent=False
+):
+    if ":" not in name:
+        agent = guess_exactly_one_resource_agent_full_name(runner, name)
+        report_processor.process(
+            reports.agent_name_guessed(name, agent.get_name())
+        )
+        return agent
 
-def get_fence_agent_metadata(runner, fence_agent):
+    try:
+        return ResourceAgent(runner, name).validate_metadata()
+    except InvalidResourceAgentName as e:
+        raise LibraryError(resource_agent_error_to_report_item(e))
+    except UnableToGetAgentMetadata as e:
+        if not allowed_absent:
+            raise LibraryError(resource_agent_error_to_report_item(e))
+
+        report_processor.process(resource_agent_error_to_report_item(
+            e,
+            severity=ReportItemSeverity.WARNING,
+            forceable=True
+        ))
+
+        return AbsentResourceAgent(runner, name)
+
+class Agent(object):
     """
-    Returns dom of metadata for specified fence agent
-    Raises AgentNotFound if fence_agent doesn't starts with fence_ or it is
-        relative path or file is not runnable.
-    Raises UnableToGetAgentMetadata if there was problem getting or
-        parsing metadata.
-
-    runner -- CommandRunner
-    fence_agent -- fence agent name, should start with 'fence_'
+    Base class for providing convinient access to an agent's metadata
     """
-    script_path = os.path.join(settings.fence_agent_binaries, fence_agent)
+    def __init__(self, runner):
+        """
+        create an instance which reads metadata by itself on demand
+        CommandRunner runner
+        """
+        self._runner = runner
+        self._metadata = None
+
+
+    def get_name(self):
+        raise NotImplementedError()
+
+
+    def get_name_info(self):
+        """
+        Get structured agent's info, only name is populated
+        """
+        return {
+            "name": self.get_name(),
+            "shortdesc":"",
+            "longdesc": "",
+            "parameters": [],
+            "actions": [],
+        }
+
+
+    def get_description_info(self):
+        """
+        Get structured agent's info, only name and description is populated
+        """
+        agent_info = self.get_name_info()
+        agent_info["shortdesc"] = self.get_shortdesc()
+        agent_info["longdesc"] = self.get_longdesc()
+        return agent_info
+
+
+    def get_full_info(self):
+        """
+        Get structured agent's info, all items are populated
+        """
+        agent_info = self.get_description_info()
+        agent_info["parameters"] = self.get_parameters()
+        agent_info["actions"] = self.get_actions()
+        return agent_info
+
+
+    def get_shortdesc(self):
+        """
+        Get a short description of agent's purpose
+        """
+        return (
+            self._get_text_from_dom_element(
+                self._get_metadata().find("shortdesc")
+            )
+            or
+            self._get_metadata().get("shortdesc", "")
+        )
 
-    if not (
-        fence_agent.startswith("fence_") and
-        __is_path_abs(script_path) and
-        is_path_runnable(script_path)
-    ):
-        raise AgentNotFound(fence_agent)
 
-    stdout, stderr, dummy_retval = runner.run(
-        [script_path, "-o", "metadata"]
-    )
+    def get_longdesc(self):
+        """
+        Get a long description of agent's purpose
+        """
+        return self._get_text_from_dom_element(
+            self._get_metadata().find("longdesc")
+        )
 
-    if stdout.strip() == "":
-        raise UnableToGetAgentMetadata(fence_agent, stderr)
 
-    try:
-        return etree.fromstring(stdout)
-    except etree.XMLSyntaxError as e:
-        raise UnableToGetAgentMetadata(fence_agent, str(e))
+    def get_parameters(self):
+        """
+        Get list of agent's parameters, each parameter is described by dict:
+        {
+            name: name of parameter
+            longdesc: long description,
+            shortdesc: short description,
+            type: data type od parameter,
+            default: default value,
+            required: True if is required parameter, False otherwise
+        }
+        """
+        params_element = self._get_metadata().find("parameters")
+        if params_element is None:
+            return []
+        return [
+            self._get_parameter(parameter)
+            for parameter in params_element.iter("parameter")
+        ]
+
+
+    def _get_parameter(self, parameter_element):
+        value_type = "string"
+        default_value = None
+        content_element = parameter_element.find("content")
+        if content_element is not None:
+            value_type = content_element.get("type", value_type)
+            default_value = content_element.get("default", default_value)
+
+        return {
+            "name": parameter_element.get("name", ""),
+            "longdesc": self._get_text_from_dom_element(
+                parameter_element.find("longdesc")
+            ),
+            "shortdesc": self._get_text_from_dom_element(
+                parameter_element.find("shortdesc")
+            ),
+            "type": value_type,
+            "default": default_value,
+            "required": is_true(parameter_element.get("required", "0")),
+            "advanced": False,
+        }
+
+
+    def validate_parameters_values(self, parameters_values):
+        """
+        Return tuple of lists (<invalid attributes>, <missing required attributes>)
+        dict parameters_values key is attribute name and value is attribute value
+        """
+        # TODO Add value and type checking (e.g. if parameter["type"] is
+        # integer, its value cannot be "abc"). This most probably will require
+        # redefining the format of the return value and rewriting the whole
+        # function, which will only be good. For now we just stick to the
+        # original legacy code.
+        agent_params = self.get_parameters()
+
+        required_missing = []
+        for attr in agent_params:
+            if attr["required"] and attr["name"] not in parameters_values:
+                required_missing.append(attr["name"])
+
+        valid_attrs = [attr["name"] for attr in agent_params]
+        return (
+            [attr for attr in parameters_values if attr not in valid_attrs],
+            required_missing
+        )
 
 
-def _get_nagios_resource_agent_metadata(agent):
-    """
-    Returns metadata dom for specified nagios resource agent.
-    Raises AgentNotFound if agent is relative path.
-    Raises UnableToGetAgentMetadata if there was problem getting or
-        parsing metadata.
+    def get_actions(self):
+        """
+        Get list of agent's actions (operations)
+        """
+        actions_element = self._get_metadata().find("actions")
+        if actions_element is None:
+            return []
+        # TODO Resulting dict should contain all keys defined for an action.
+        # But we do not know what are those, because the metadata xml schema is
+        # outdated and doesn't describe current agents' metadata xml.
+        return [
+            dict(action.items())
+            for action in actions_element.iter("action")
+        ]
 
-    agent -- name of nagios resource agent
-    """
-    agent_name = "nagios:" + agent
-    metadata_path = os.path.join(settings.nagios_metadata_path, agent + ".xml")
 
-    if not __is_path_abs(metadata_path):
-        raise AgentNotFound(agent_name)
+    def _get_metadata(self):
+        """
+        Return metadata DOM
+        Raise UnableToGetAgentMetadata if agent doesn't exist or unable to get
+            or parse its metadata
+        """
+        if self._metadata is None:
+            self._metadata = self._parse_metadata(self._load_metadata())
+        return self._metadata
 
-    try:
-        return etree.parse(metadata_path).getroot()
-    except Exception as e:
-        raise UnableToGetAgentMetadata(agent_name, str(e))
 
+    def _load_metadata(self):
+        raise NotImplementedError()
 
-def _get_ocf_resource_agent_metadata(runner, provider, agent):
-    """
-    Returns metadata dom for specified ocf resource agent
-    Raises AgentNotFound if specified agent is relative path or file is not
-        runnable.
-    Raises UnableToGetAgentMetadata if there was problem getting or
-    parsing metadata.
-
-    runner -- CommandRunner
-    provider -- resource agent provider
-    agent -- resource agent name
-    """
-    agent_name = "ocf:" + provider + ":" + agent
 
-    script_path = os.path.join(settings.ocf_resources, provider, agent)
+    def _parse_metadata(self, metadata):
+        try:
+            dom = etree.fromstring(metadata)
+            # TODO Majority of agents don't provide valid metadata, so we skip
+            # the validation for now. We want to enable it once the schema
+            # and/or agents are fixed.
+            # When enabling this check for overrides in child classes.
+            #if os.path.isfile(settings.agent_metadata_schema):
+            #    etree.DTD(file=settings.agent_metadata_schema).assertValid(dom)
+            return dom
+        except (etree.XMLSyntaxError, etree.DocumentInvalid) as e:
+            raise UnableToGetAgentMetadata(self.get_name(), str(e))
+
+
+    def _get_text_from_dom_element(self, element):
+        if element is None or element.text is None:
+            return ""
+        return element.text.strip()
 
-    if not __is_path_abs(script_path) or not is_path_runnable(script_path):
-        raise AgentNotFound(agent_name)
 
-    stdout, stderr, dummy_retval = runner.run(
-        [script_path, "meta-data"],
-        env_extend={"OCF_ROOT": settings.ocf_root}
-    )
+class FakeAgentMetadata(Agent):
+    def get_name(self):
+        raise NotImplementedError()
 
-    if stdout.strip() == "":
-        raise UnableToGetAgentMetadata(agent_name, stderr)
 
-    try:
-        return etree.fromstring(stdout)
-    except etree.XMLSyntaxError as e:
-        raise UnableToGetAgentMetadata(agent_name, str(e))
+    def _load_metadata(self):
+        raise NotImplementedError()
 
 
-def get_agent_desc(metadata_dom):
-    """
-    Returns dictionary which contains description of agent from it's metadata.
-    dictionary format:
-    {
-        longdesc: long description
-        shortdesc: short description
-    }
-    Raises InvalidMetadataFormat if metadata_dom is not in valid format.
-
-    metadata_dom -- metadata dom of agent
-    """
-    if metadata_dom.tag != "resource-agent":
-        raise InvalidMetadataFormat()
+class StonithdMetadata(FakeAgentMetadata):
+    def get_name(self):
+        return "stonithd"
 
-    shortdesc_el = metadata_dom.find("shortdesc")
-    if shortdesc_el is None:
-        shortdesc = metadata_dom.get("shortdesc", "")
-    else:
-        shortdesc = shortdesc_el.text
 
-    return {
-        "longdesc": __get_text_from_dom_element(metadata_dom.find("longdesc")),
-        "shortdesc": "" if shortdesc is None else shortdesc.strip()
-    }
+    def _get_parameter(self, parameter_element):
+        parameter = super(StonithdMetadata, self)._get_parameter(
+            parameter_element
+        )
+        # Metadata are written in such a way that a longdesc text is a
+        # continuation of a shortdesc text.
+        parameter["longdesc"] = "{0}\n{1}".format(
+            parameter["shortdesc"],
+            parameter["longdesc"]
+        ).strip()
+        parameter["advanced"] = parameter["shortdesc"].startswith(
+            "Advanced use only"
+        )
+        return parameter
 
 
-def _filter_fence_agent_parameters(parameters):
-    """
-    Returns filtered list of fence agent parameters. It removes parameters
-    that user should not be setting.
+    def _load_metadata(self):
+        stdout, stderr, dummy_retval = self._runner.run(
+            [settings.stonithd_binary, "metadata"]
+        )
+        metadata = stdout.strip()
+        if not metadata:
+            raise UnableToGetAgentMetadata(self.get_name(), stderr.strip())
+        return metadata
 
-    parameters -- list of fence agent parameters
-    """
-    # we don't allow user to change these options, they are intended
-    # to be used interactively (command line), there is no point setting them
-    banned_parameters = ["debug", "verbose", "version", "help"]
-    # but still, we have to let user change 'action' because of backward
-    # compatibility, just marking it as not required
-    for param in parameters:
-        if param["name"] == "action":
-            param["shortdesc"] = param.get("shortdesc", "") + "\nWARNING: " +\
-                "specifying 'action' is deprecated and not necessary with " +\
-                "current Pacemaker versions"
-            param["required"] = False
-    return [
-        param for param in parameters if param["name"] not in banned_parameters
-    ]
 
+class CrmAgent(Agent):
+    def __init__(self, runner, full_agent_name):
+        """
+        init
+        CommandRunner runner
+        string full_agent_name standard:provider:type or standard:type
+        """
+        super(CrmAgent, self).__init__(runner)
+        self._full_agent_name = full_agent_name
 
-def get_fence_agent_parameters(runner, metadata_dom):
-    """
-    Returns complete list of parameters for fence agent from it's metadata.
 
-    runner -- CommandRunner
-    metadata_dom -- metadata dom of fence agent
-    """
-    return (
-        _filter_fence_agent_parameters(_get_agent_parameters(metadata_dom)) +
-        _get_pcmk_advanced_stonith_parameters(runner)
-    )
+    def get_name(self):
+        return self._full_agent_name
 
 
-def get_resource_agent_parameters(metadata_dom):
-    """
-    Returns complete list of parameters for resource agent from it's
-    metadata.
+    def is_valid_metadata(self):
+        """
+        If we are able to get metadata, we consider the agent existing and valid
+        """
+        # if the agent is valid, we do not need to load its metadata again
+        try:
+            self._get_metadata()
+        except UnableToGetAgentMetadata:
+            return False
+        return True
+
+    def validate_metadata(self):
+        """
+        Validate metadata by attepmt to retrieve it.
+        """
+        self._get_metadata()
+        return self
+
+    def _load_metadata(self):
+        env_path = ":".join([
+            # otherwise pacemaker cannot run RHEL fence agents to get their
+            # metadata
+            settings.fence_agent_binaries,
+            # otherwise heartbeat and cluster-glue agents don't work
+            "/bin/",
+            # otherwise heartbeat and cluster-glue agents don't work
+            "/usr/bin/",
+        ])
+        stdout, stderr, retval = self._runner.run(
+            [_crm_resource, "--show-metadata", self._full_agent_name],
+            env_extend={
+                "PATH": env_path,
+            }
+        )
+        if retval != 0:
+            raise UnableToGetAgentMetadata(self.get_name(), stderr.strip())
+        return stdout.strip()
+
 
-    metadata_dom -- metadata dom of resource agent
+class ResourceAgent(CrmAgent):
     """
-    return _get_agent_parameters(metadata_dom)
+    Provides convinient access to a resource agent's metadata
+    """
+    def __init__(self, runner, full_agent_name):
+        if not re.match("^[^:]+(:[^:]+){1,2}$", full_agent_name):
+            raise InvalidResourceAgentName(full_agent_name)
+        super(ResourceAgent, self).__init__(runner, full_agent_name)
 
+class AbsentResourceAgent(ResourceAgent):
+    def _load_metadata(self):
+        return "<resource-agent/>"
 
-def get_resource_agent_metadata(runner, agent):
-    """
-    Returns metadata of specified agent as dom
-    Raises UnsupportedResourceAgent if specified agent is not ocf or nagios
-        agent.
+    def validate_parameters_values(self, parameters_values):
+        return ([], [])
 
-    runner -- CommandRunner
-    agent -- agent name
-    """
-    error = UnsupportedResourceAgent(agent)
-    if agent.startswith("ocf:"):
-        agent_info = agent.split(":", 2)
-        if len(agent_info) != 3:
-            raise error
-        return _get_ocf_resource_agent_metadata(runner, *agent_info[1:])
-    elif agent.startswith("nagios:"):
-        return _get_nagios_resource_agent_metadata(agent.split("nagios:", 1)[1])
-    else:
-        raise error
-
-
-def _get_action(action_el):
+class StonithAgent(CrmAgent):
     """
-    Returns XML action element as dictionary, where all elements attributes
-    are key of dict
-    Raises InvalidMetadataFormat if action_el is not in valid format.
-
-    action_el -- action lxml.etree element
+    Provides convinient access to a stonith agent's metadata
     """
-    if action_el.tag != "action" or action_el.get("name") is None:
-        raise InvalidMetadataFormat()
-
-    return dict(action_el.items())
 
+    _stonithd_metadata = None
 
-def get_agent_actions(metadata_dom):
-    """
-    Returns list of actions from agents metadata
-    Raises InvalidMetadataFormat if metadata_dom is not in valid format.
 
-    metadata_dom -- agent's metadata dom
-    """
-    if metadata_dom.tag != "resource-agent":
-        raise InvalidMetadataFormat()
+    def __init__(self, runner, agent_name):
+        super(StonithAgent, self).__init__(
+            runner,
+            "stonith:{0}".format(agent_name)
+        )
+        self._agent_name = agent_name
 
-    actions_el = metadata_dom.find("actions")
-    if actions_el is None:
-        return []
 
-    return [
-        _get_action(action) for action in actions_el.iter("action")
-    ]
+    def get_name(self):
+        return self._agent_name
 
 
-def _validate_instance_attributes(agent_params, attrs):
-    valid_attrs = [attr["name"] for attr in agent_params]
-    required_missing = []
+    def get_parameters(self):
+        return (
+            self._filter_parameters(
+                super(StonithAgent, self).get_parameters()
+            )
+            +
+            self._get_stonithd_metadata().get_parameters()
+        )
 
-    for attr in agent_params:
-        if attr["required"] and attr["name"] not in attrs:
-            required_missing.append(attr["name"])
 
-    return [attr for attr in attrs if attr not in valid_attrs], required_missing
+    def _filter_parameters(self, parameters):
+        """
+        Remove parameters that should not be available to the user.
+        """
+        # We don't allow the user to change these options which are only
+        # intended to be used interactively on command line.
+        remove_parameters = frozenset([
+            "debug",
+            "help",
+            "verbose",
+            "version",
+        ])
+        filtered = []
+        for param in parameters:
+            if param["name"] in remove_parameters:
+                continue
+            elif param["name"] == "action":
+                # However we still need the user to be able to set 'action' due
+                # to backward compatibility reasons. So we just mark it as not
+                # required.
+                new_param = dict(param)
+                new_param["shortdesc"] = "\n".join(filter(None, [
+                    param.get("shortdesc", ""),
+                    "WARNING: specifying 'action' is deprecated and not "
+                        "necessary with current Pacemaker versions."
+                    ,
+                ]))
+                new_param["required"] = False
+                filtered.append(new_param)
+            else:
+                filtered.append(param)
+            # 'port' parameter is required by a fence agent, but it is filled
+            # automatically by pacemaker based on 'pcmk_host_map' or
+            # 'pcmk_host_list' parameter (defined in stonithd metadata).
+            # Pacemaker marks the 'port' parameter as not required for us.
+        return filtered
+
+
+    def _get_stonithd_metadata(self):
+        if not self.__class__._stonithd_metadata:
+            self.__class__._stonithd_metadata = StonithdMetadata(self._runner)
+        return self.__class__._stonithd_metadata
+
+
+    def get_actions(self):
+        # In previous versions of pcs there was no way to read actions from
+        # stonith agents, the functions always returned an empty list. It
+        # wasn't clear if that is a mistake or an intention. We keep it that
+        # way for two reasons:
+        # 1) Fence agents themselfs specify the actions without any attributes
+        # (interval, timeout)
+        # 2) Pacemaker explained shows an example stonith agent configuration
+        # in CIB with only monitor operation specified (and that pcs creates
+        # automatically in "pcs stonith create" regardless of provided actions
+        # from here).
+        # It may be better to return real actions from this class and deal ommit
+        # them in higher layers, which can decide if the actions are desired or
+        # not. For now there is not enough information to do that. Code which
+        # uses this is not clean enough. Once everything is cleaned we should
+        # decide if it is better to move this to higher level.
+        return []
 
 
-def validate_instance_attributes(runner, instance_attrs, agent):
-    """
-    Validates instance attributes according to specified agent.
-    Returns tuple of lists (<invalid attributes>, <missing required attributes>)
-
-    runner -- CommandRunner
-    instance_attrs -- dictionary of instance attributes, where key is
-        attribute name and value is attribute value
-    agent -- full name (<class>:<agent> or <class>:<provider>:<agent>)
-        of resource/fence agent
-    """
-    if agent.startswith("stonith:"):
-        agent_params = get_fence_agent_parameters(
-            runner,
-            get_fence_agent_metadata(runner, agent.split("stonith:", 1)[1])
-        )
-        bad_attrs, missing_required = _validate_instance_attributes(
-            agent_params, instance_attrs
-        )
-        if "port" in missing_required:
-            # Temporarily make "port" an optional parameter. Once we are
-            # getting metadata from pacemaker, this will be reviewed and fixed.
-            missing_required.remove("port")
-        return bad_attrs, missing_required
-    else:
-        agent_params = get_resource_agent_parameters(
-            get_resource_agent_metadata(runner, agent)
-        )
-        return _validate_instance_attributes(agent_params, instance_attrs)
+    def get_provides_unfencing(self):
+        # self.get_actions returns an empty list
+        for action in super(StonithAgent, self).get_actions():
+            if (
+                action.get("name", "") == "on"
+                and
+                action.get("on_target", "0") == "1"
+                and
+                action.get("automatic", "0") == "1"
+            ):
+                return True
+        return False
 
 
-def resource_agent_lib_error_to_report_item(
+def resource_agent_error_to_report_item(
     e, severity=ReportItemSeverity.ERROR, forceable=False
 ):
     """
-    Transform ResourceAgentLibError to ReportItem
+    Transform ResourceAgentError to ReportItem
     """
     force = None
-    if e.__class__ == AgentNotFound:
-        if severity == ReportItemSeverity.ERROR and forceable:
-            force = report_codes.FORCE_UNKNOWN_AGENT
-        return reports.agent_not_found(e.agent, severity, force)
-    if e.__class__ == UnsupportedResourceAgent:
-        if severity == ReportItemSeverity.ERROR and forceable:
-            force = report_codes.FORCE_UNSUPPORTED_AGENT
-        return reports.agent_not_supported(e.agent, severity, force)
     if e.__class__ == UnableToGetAgentMetadata:
         if severity == ReportItemSeverity.ERROR and forceable:
             force = report_codes.FORCE_METADATA_ISSUE
         return reports.unable_to_get_agent_metadata(
             e.agent, e.message, severity, force
         )
-    if e.__class__ == InvalidMetadataFormat:
-        if severity == ReportItemSeverity.ERROR and forceable:
-            force = report_codes.FORCE_METADATA_ISSUE
-        return reports.invalid_metadata_format(severity, force)
-    if e.__class__ == ResourceAgentCommonError:
-        return reports.resource_agent_general_error(e.agent)
-    if e.__class__ == ResourceAgentLibError:
-        return reports.resource_agent_general_error()
+    if e.__class__ == InvalidResourceAgentName:
+        return reports.invalid_resource_agent_name(e.agent)
     raise e
diff --git a/pcs/lib/sbd.py b/pcs/lib/sbd.py
index 9b57400..f6b305d 100644
--- a/pcs/lib/sbd.py
+++ b/pcs/lib/sbd.py
@@ -444,6 +444,10 @@ def get_sbd_config(communicator, node):
     return communicator.call_node(node, "remote/get_sbd_config", None)
 
 
+def get_sbd_service_name():
+    return "sbd" if external.is_systemctl() else "sbd_helper"
+
+
 def is_sbd_enabled(runner):
     """
     Check if SBD service is enabled in local system.
@@ -451,7 +455,8 @@ def is_sbd_enabled(runner):
 
     runner -- CommandRunner
     """
-    return external.is_service_enabled(runner, "sbd")
+    return external.is_service_enabled(runner, get_sbd_service_name())
+
 
 
 def is_sbd_installed(runner):
@@ -461,5 +466,5 @@ def is_sbd_installed(runner):
 
     runner -- CommandRunner
     """
-    return external.is_service_installed(runner, "sbd")
+    return external.is_service_installed(runner, get_sbd_service_name())
 
diff --git a/pcs/lib/test/test_cluster_conf_facade.py b/pcs/lib/test/test_cluster_conf_facade.py
new file mode 100644
index 0000000..44c6c58
--- /dev/null
+++ b/pcs/lib/test/test_cluster_conf_facade.py
@@ -0,0 +1,151 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase
+
+from pcs.test.tools.assertions import (
+    assert_raise_library_error,
+    assert_xml_equal,
+)
+from pcs.test.tools.misc import outdent
+
+from lxml import etree
+
+from pcs.common import report_codes
+from pcs.lib.errors import ReportItemSeverity as severity
+
+from pcs.lib import cluster_conf_facade as lib
+
+class FromStringTest(TestCase):
+    def test_success(self):
+        facade = lib.ClusterConfFacade.from_string("<cluster/>")
+        self.assertTrue(isinstance(facade, lib.ClusterConfFacade))
+        assert_xml_equal("<cluster/>", etree.tostring(facade._config).decode())
+
+    def test_syntax_error(self):
+        assert_raise_library_error(
+            lambda: lib.ClusterConfFacade.from_string("<cluster>"),
+            (
+                severity.ERROR,
+                report_codes.CLUSTER_CONF_LOAD_ERROR_INVALID_FORMAT,
+                {}
+            )
+        )
+
+    def test_invalid_document_error(self):
+        assert_raise_library_error(
+            lambda: lib.ClusterConfFacade.from_string("string"),
+            (
+                severity.ERROR,
+                report_codes.CLUSTER_CONF_LOAD_ERROR_INVALID_FORMAT,
+                {}
+            )
+        )
+
+
+class GetClusterNameTest(TestCase):
+    def test_success(self):
+        cfg = etree.XML('<cluster name="cluster-name"/>')
+        self.assertEqual(
+            "cluster-name",
+            lib.ClusterConfFacade(cfg).get_cluster_name()
+        )
+
+    def test_no_name(self):
+        cfg = etree.XML('<cluster/>')
+        self.assertEqual("", lib.ClusterConfFacade(cfg).get_cluster_name())
+
+    def test_not_cluster_element(self):
+        cfg = etree.XML('<not_cluster/>')
+        self.assertEqual("", lib.ClusterConfFacade(cfg).get_cluster_name())
+
+
+class GetNodesTest(TestCase):
+    def assert_equal_nodelist(self, expected_nodes, real_nodelist):
+        real_nodes = [
+            {"ring0": n.ring0, "ring1": n.ring1, "name": n.name, "id": n.id}
+            for n in real_nodelist
+        ]
+        self.assertEqual(expected_nodes, real_nodes)
+
+    def test_success(self):
+        config = outdent("""
+            <cluster>
+                <clusternodes>
+                    <clusternode name="node1" nodeid="1">
+                        <altname name="node1-altname"/>
+                    </clusternode>
+                    <clusternode name="node2" nodeid="2"/>
+                    <clusternode name="node3" nodeid="3"/>
+                </clusternodes>
+            </cluster>
+        """)
+        self.assert_equal_nodelist(
+            [
+                {
+                    "ring0": "node1",
+                    "ring1": "node1-altname",
+                    "name": None,
+                    "id": "1",
+                },
+                {
+                    "ring0": "node2",
+                    "ring1": None,
+                    "name": None,
+                    "id": "2",
+                },
+                {
+                    "ring0": "node3",
+                    "ring1": None,
+                    "name": None,
+                    "id": "3",
+                }
+            ],
+            lib.ClusterConfFacade(etree.XML(config)).get_nodes()
+        )
+
+    def test_no_nodes(self):
+        config = "<cluster/>"
+        self.assert_equal_nodelist(
+            [], lib.ClusterConfFacade(etree.XML(config)).get_nodes()
+        )
+
+    def test_missing_info(self):
+        config = outdent("""
+            <cluster>
+                <clusternodes>
+                    <clusternode nodeid="1"/>
+                    <clusternode name="node2">
+                        <altname/>
+                    </clusternode>
+                    <clusternode/>
+                </clusternodes>
+            </cluster>
+        """)
+        self.assert_equal_nodelist(
+            [
+                {
+                    "ring0": None,
+                    "ring1": None,
+                    "name": None,
+                    "id": "1",
+                },
+                {
+                    "ring0": "node2",
+                    "ring1": None,
+                    "name": None,
+                    "id": None,
+                },
+                {
+                    "ring0": None,
+                    "ring1": None,
+                    "name": None,
+                    "id": None,
+                }
+            ],
+            lib.ClusterConfFacade(etree.XML(config)).get_nodes()
+        )
diff --git a/pcs/lib/test/test_pacemaker_values.py b/pcs/lib/test/test_pacemaker_values.py
index 62b8e91..e192971 100644
--- a/pcs/lib/test/test_pacemaker_values.py
+++ b/pcs/lib/test/test_pacemaker_values.py
@@ -112,11 +112,10 @@ class ValidateIdTest(TestCase):
             lambda: lib.validate_id("", "test id"),
             (
                 severity.ERROR,
-                report_codes.INVALID_ID,
+                report_codes.EMPTY_ID,
                 {
                     "id": "",
                     "id_description": "test id",
-                    "reason": "empty",
                 }
             )
         )
@@ -126,8 +125,8 @@ class ValidateIdTest(TestCase):
         info = {
             "id": "",
             "id_description": desc,
-            "reason": "invalid first character",
             "invalid_character": "",
+            "is_first_char": True,
         }
         report = (severity.ERROR, report_codes.INVALID_ID, info)
 
@@ -192,8 +191,8 @@ class ValidateIdTest(TestCase):
         info = {
             "id": "",
             "id_description": desc,
-            "reason": "invalid character",
             "invalid_character": "",
+            "is_first_char": False,
         }
         report = (severity.ERROR, report_codes.INVALID_ID, info)
 
diff --git a/pcs/lib/test/test_resource_agent.py b/pcs/lib/test/test_resource_agent.py
new file mode 100644
index 0000000..5298415
--- /dev/null
+++ b/pcs/lib/test/test_resource_agent.py
@@ -0,0 +1,1631 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from lxml import etree
+from functools import partial
+
+from pcs.test.tools.assertions import (
+    ExtendedAssertionsMixin,
+    assert_raise_library_error,
+    assert_xml_equal,
+)
+from pcs.test.tools.misc import create_patcher
+from pcs.test.tools.pcs_unittest import TestCase, mock
+from pcs.test.tools.xml import XmlManipulation
+
+from pcs.common import report_codes
+from pcs.lib import resource_agent as lib_ra
+from pcs.lib.errors import ReportItemSeverity as severity, LibraryError
+from pcs.lib.external import CommandRunner
+
+patch_agent = create_patcher("pcs.lib.resource_agent")
+
+class ListResourceAgentsStandardsTest(TestCase):
+    def test_success_and_filter_stonith_out(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        agents = [
+            "ocf",
+            "lsb",
+            "service",
+            "systemd",
+            "nagios",
+            "stonith",
+        ]
+        # retval is number of providers found
+        mock_runner.run.return_value = (
+            "\n".join(agents) + "\n",
+            "",
+            len(agents)
+        )
+
+        self.assertEqual(
+            lib_ra.list_resource_agents_standards(mock_runner),
+            [
+                "lsb",
+                "nagios",
+                "ocf",
+                "service",
+                "systemd",
+            ]
+        )
+
+        mock_runner.run.assert_called_once_with([
+            "/usr/sbin/crm_resource", "--list-standards"
+        ])
+
+
+    def test_success_filter_whitespace(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        agents = [
+            "",
+            "ocf",
+            "  lsb",
+            "service  ",
+            "systemd",
+            "  nagios  ",
+            "",
+            "stonith",
+            "",
+        ]
+        # retval is number of providers found
+        mock_runner.run.return_value = (
+            "\n".join(agents) + "\n",
+            "",
+            len(agents)
+        )
+
+        self.assertEqual(
+            lib_ra.list_resource_agents_standards(mock_runner),
+            [
+                "lsb",
+                "nagios",
+                "ocf",
+                "service",
+                "systemd",
+            ]
+        )
+
+        mock_runner.run.assert_called_once_with([
+            "/usr/sbin/crm_resource", "--list-standards"
+        ])
+
+
+    def test_empty(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = ("", "", 0)
+
+        self.assertEqual(
+            lib_ra.list_resource_agents_standards(mock_runner),
+            []
+        )
+
+        mock_runner.run.assert_called_once_with([
+            "/usr/sbin/crm_resource", "--list-standards"
+        ])
+
+
+    def test_error(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = ("lsb", "error", 1)
+
+        self.assertEqual(
+            lib_ra.list_resource_agents_standards(mock_runner),
+            ["lsb"]
+        )
+
+        mock_runner.run.assert_called_once_with([
+            "/usr/sbin/crm_resource", "--list-standards"
+        ])
+
+
+class ListResourceAgentsOcfProvidersTest(TestCase):
+    def test_success(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        providers = [
+            "heartbeat",
+            "openstack",
+            "pacemaker",
+            "booth",
+        ]
+        # retval is number of providers found
+        mock_runner.run.return_value = (
+            "\n".join(providers) + "\n",
+            "",
+            len(providers)
+        )
+
+        self.assertEqual(
+            lib_ra.list_resource_agents_ocf_providers(mock_runner),
+            [
+                "booth",
+                "heartbeat",
+                "openstack",
+                "pacemaker",
+            ]
+        )
+
+        mock_runner.run.assert_called_once_with([
+            "/usr/sbin/crm_resource", "--list-ocf-providers"
+        ])
+
+
+    def test_success_filter_whitespace(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        providers = [
+            "",
+            "heartbeat",
+            " openstack",
+            "pacemaker ",
+            " booth ",
+        ]
+        # retval is number of providers found
+        mock_runner.run.return_value = (
+            "\n".join(providers) + "\n",
+            "",
+            len(providers)
+        )
+
+        self.assertEqual(
+            lib_ra.list_resource_agents_ocf_providers(mock_runner),
+            [
+                "booth",
+                "heartbeat",
+                "openstack",
+                "pacemaker",
+            ]
+        )
+
+        mock_runner.run.assert_called_once_with([
+            "/usr/sbin/crm_resource", "--list-ocf-providers"
+        ])
+
+
+    def test_empty(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = ("", "", 0)
+
+        self.assertEqual(
+            lib_ra.list_resource_agents_ocf_providers(mock_runner),
+            []
+        )
+
+        mock_runner.run.assert_called_once_with([
+            "/usr/sbin/crm_resource", "--list-ocf-providers"
+        ])
+
+
+    def test_error(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = ("booth", "error", 1)
+
+        self.assertEqual(
+            lib_ra.list_resource_agents_ocf_providers(mock_runner),
+            ["booth"]
+        )
+
+        mock_runner.run.assert_called_once_with([
+            "/usr/sbin/crm_resource", "--list-ocf-providers"
+        ])
+
+
+class ListResourceAgentsStandardsAndProvidersTest(TestCase):
+    def test_success(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.side_effect = [
+            (
+                "\n".join([
+                    "ocf",
+                    "lsb",
+                    "service",
+                    "systemd",
+                    "nagios",
+                    "stonith",
+                    "",
+                ]),
+                "",
+                0
+            ),
+            (
+                "\n".join([
+                    "heartbeat",
+                    "openstack",
+                    "pacemaker",
+                    "booth",
+                    "",
+                ]),
+                "",
+                0
+            ),
+        ]
+
+        self.assertEqual(
+            lib_ra.list_resource_agents_standards_and_providers(mock_runner),
+            [
+                "lsb",
+                "nagios",
+                "ocf:booth",
+                "ocf:heartbeat",
+                "ocf:openstack",
+                "ocf:pacemaker",
+                "service",
+                "systemd",
+            ]
+        )
+
+        self.assertEqual(2, len(mock_runner.run.mock_calls))
+        mock_runner.run.assert_has_calls([
+            mock.call(["/usr/sbin/crm_resource", "--list-standards"]),
+            mock.call(["/usr/sbin/crm_resource", "--list-ocf-providers"]),
+        ])
+
+
+class ListResourceAgentsTest(TestCase):
+    def test_success_standard(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (
+            "\n".join([
+                "docker",
+                "Dummy",
+                "dhcpd",
+                "Dummy",
+                "ethmonitor",
+                "",
+            ]),
+            "",
+            0
+        )
+
+        self.assertEqual(
+            lib_ra.list_resource_agents(mock_runner, "ocf"),
+            [
+                "dhcpd",
+                "docker",
+                "Dummy",
+                "Dummy",
+                "ethmonitor",
+            ]
+        )
+
+        mock_runner.run.assert_called_once_with([
+            "/usr/sbin/crm_resource", "--list-agents", "ocf"
+        ])
+
+
+    def test_success_standard_provider(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (
+            "\n".join([
+                "ping",
+                "SystemHealth",
+                "SysInfo",
+                "HealthCPU",
+                "Dummy",
+                "",
+            ]),
+            "",
+            0
+        )
+
+        self.assertEqual(
+            lib_ra.list_resource_agents(mock_runner, "ocf:pacemaker"),
+            [
+                "Dummy",
+                "HealthCPU",
+                "ping",
+                "SysInfo",
+                "SystemHealth",
+            ]
+        )
+
+        mock_runner.run.assert_called_once_with([
+            "/usr/sbin/crm_resource", "--list-agents", "ocf:pacemaker"
+        ])
+
+
+    def test_bad_standard(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (
+            "",
+            "No agents found for standard=nonsense, provider=*",
+            1
+        )
+
+        self.assertEqual(
+            lib_ra.list_resource_agents(mock_runner, "nonsense"),
+            []
+        )
+
+        mock_runner.run.assert_called_once_with([
+            "/usr/sbin/crm_resource", "--list-agents", "nonsense"
+        ])
+
+
+class ListStonithAgentsTest(TestCase):
+    def test_success(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (
+            "\n".join([
+                "fence_xvm",
+                "fence_wti",
+                "fence_vmware_soap",
+                "fence_virt",
+                "fence_scsi",
+                "",
+            ]),
+            "",
+            0
+        )
+
+        self.assertEqual(
+            lib_ra.list_stonith_agents(mock_runner),
+            [
+                "fence_scsi",
+                "fence_virt",
+                "fence_vmware_soap",
+                "fence_wti",
+                "fence_xvm",
+            ]
+        )
+
+        mock_runner.run.assert_called_once_with([
+            "/usr/sbin/crm_resource", "--list-agents", "stonith"
+        ])
+
+
+    def test_no_agents(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (
+            "",
+            "No agents found for standard=stonith provider=*",
+            1
+        )
+
+        self.assertEqual(
+            lib_ra.list_stonith_agents(mock_runner),
+            []
+        )
+
+        mock_runner.run.assert_called_once_with([
+            "/usr/sbin/crm_resource", "--list-agents", "stonith"
+        ])
+
+
+    def test_filter_hidden_agents(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (
+            "\n".join([
+                "fence_na",
+                "fence_wti",
+                "fence_scsi",
+                "fence_vmware_helper",
+                "fence_nss_wrapper",
+                "fence_node",
+                "fence_vmware_soap",
+                "fence_virt",
+                "fence_pcmk",
+                "fence_sanlockd",
+                "fence_xvm",
+                "fence_ack_manual",
+                "fence_legacy",
+                "fence_check",
+                "fence_tool",
+                "fence_kdump_send",
+                "fence_virtd",
+                "",
+            ]),
+            "",
+            0
+        )
+
+        self.assertEqual(
+            lib_ra.list_stonith_agents(mock_runner),
+            [
+                "fence_scsi",
+                "fence_virt",
+                "fence_vmware_soap",
+                "fence_wti",
+                "fence_xvm",
+            ]
+        )
+
+        mock_runner.run.assert_called_once_with([
+            "/usr/sbin/crm_resource", "--list-agents", "stonith"
+        ])
+
+
+class GuessResourceAgentFullNameTest(TestCase):
+    def setUp(self):
+        self.mock_runner_side_effect = [
+            # list standards
+            ("ocf\n", "", 0),
+            # list providers
+            ("heartbeat\npacemaker\n", "", 0),
+            # list agents for standard-provider pairs
+            ("Delay\nDummy\n", "", 0),
+            ("Dummy\nStateful\n", "", 0),
+        ]
+
+
+    def test_one_agent_list(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.side_effect = (
+            self.mock_runner_side_effect
+            +
+            [
+                ("<resource-agent />", "", 0)
+            ]
+        )
+
+        self.assertEqual(
+            [
+                agent.get_name() for agent in
+                lib_ra.guess_resource_agent_full_name(mock_runner, "delay")
+            ],
+            ["ocf:heartbeat:Delay"]
+        )
+
+
+    def test_one_agent_exception(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.side_effect = (
+            self.mock_runner_side_effect
+            +
+            [
+                ("<resource-agent />", "", 0),
+            ]
+        )
+
+        self.assertEqual(
+            lib_ra.guess_exactly_one_resource_agent_full_name(
+                mock_runner,
+                "delay"
+            ).get_name(),
+            "ocf:heartbeat:Delay"
+        )
+
+
+    def test_two_agents_list(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.side_effect = (
+            self.mock_runner_side_effect
+            +
+            [
+                ("<resource-agent />", "", 0),
+                ("<resource-agent />", "", 0),
+            ]
+        )
+
+        self.assertEqual(
+            [
+                agent.get_name() for agent in
+                lib_ra.guess_resource_agent_full_name(mock_runner, "dummy")
+            ],
+            ["ocf:heartbeat:Dummy", "ocf:pacemaker:Dummy"]
+        )
+
+
+    def test_two_agents_one_valid_list(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.side_effect = (
+            self.mock_runner_side_effect
+            +
+            [
+                ("<resource-agent />", "", 0),
+                ("invalid metadata", "", 0),
+            ]
+        )
+
+        self.assertEqual(
+            [
+                agent.get_name() for agent in
+                lib_ra.guess_resource_agent_full_name(mock_runner, "dummy")
+            ],
+            ["ocf:heartbeat:Dummy"]
+        )
+
+
+    def test_two_agents_exception(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.side_effect = (
+            self.mock_runner_side_effect
+            +
+            [
+                ("<resource-agent />", "", 0),
+                ("<resource-agent />", "", 0),
+            ]
+        )
+
+        assert_raise_library_error(
+            lambda: lib_ra.guess_exactly_one_resource_agent_full_name(
+                mock_runner,
+                "dummy"
+            ),
+            (
+                severity.ERROR,
+                report_codes.AGENT_NAME_GUESS_FOUND_MORE_THAN_ONE,
+                {
+                    "agent": "dummy",
+                    "possible_agents": [
+                        "ocf:heartbeat:Dummy",
+                        "ocf:pacemaker:Dummy"
+                    ],
+                }
+            ),
+        )
+
+
+    def test_no_agents_list(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.side_effect = self.mock_runner_side_effect
+
+        self.assertEqual(
+            lib_ra.guess_resource_agent_full_name(mock_runner, "missing"),
+            []
+        )
+
+
+    def test_no_agents_exception(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.side_effect = self.mock_runner_side_effect
+
+        assert_raise_library_error(
+            lambda: lib_ra.guess_exactly_one_resource_agent_full_name(
+                mock_runner,
+                "missing"
+            ),
+            (
+                severity.ERROR,
+                report_codes.AGENT_NAME_GUESS_FOUND_NONE,
+                {
+                    "agent": "missing",
+                }
+            ),
+        )
+
+
+    def test_no_valids_agent_list(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.side_effect = (
+            self.mock_runner_side_effect
+            +
+            [
+                ("invalid metadata", "", 0),
+            ]
+        )
+
+        self.assertEqual(
+            lib_ra.guess_resource_agent_full_name(mock_runner, "Delay"),
+            []
+        )
+
+
+ at mock.patch.object(lib_ra.Agent, "_get_metadata")
+class AgentMetadataGetShortdescTest(TestCase):
+    def setUp(self):
+        self.agent = lib_ra.Agent(
+            mock.MagicMock(spec_set=CommandRunner)
+        )
+
+
+    def test_no_desc(self, mock_metadata):
+        xml = '<resource-agent />'
+        mock_metadata.return_value = etree.XML(xml)
+        self.assertEqual(
+            self.agent.get_shortdesc(),
+            ""
+        )
+
+
+    def test_shortdesc_attribute(self, mock_metadata):
+        xml = '<resource-agent shortdesc="short description" />'
+        mock_metadata.return_value = etree.XML(xml)
+        self.assertEqual(
+            self.agent.get_shortdesc(),
+            "short description"
+        )
+
+
+    def test_shortdesc_element(self, mock_metadata):
+        xml = """
+            <resource-agent>
+                <shortdesc>  short \n description  </shortdesc>
+            </resource-agent>
+        """
+        mock_metadata.return_value = etree.XML(xml)
+        self.assertEqual(
+            self.agent.get_shortdesc(),
+            "short \n description"
+        )
+
+
+ at mock.patch.object(lib_ra.Agent, "_get_metadata")
+class AgentMetadataGetLongdescTest(TestCase):
+    def setUp(self):
+        self.agent = lib_ra.Agent(
+            mock.MagicMock(spec_set=CommandRunner)
+        )
+
+
+    def test_no_desc(self, mock_metadata):
+        xml = '<resource-agent />'
+        mock_metadata.return_value = etree.XML(xml)
+        self.assertEqual(
+            self.agent.get_longdesc(),
+            ""
+        )
+
+
+    def test_longesc_element(self, mock_metadata):
+        xml = """
+            <resource-agent>
+                <longdesc>  long \n description  </longdesc>
+            </resource-agent>
+        """
+        mock_metadata.return_value = etree.XML(xml)
+        self.assertEqual(
+            self.agent.get_longdesc(),
+            "long \n description"
+        )
+
+
+ at mock.patch.object(lib_ra.Agent, "_get_metadata")
+class AgentMetadataGetParametersTest(TestCase):
+    def setUp(self):
+        self.agent = lib_ra.Agent(
+            mock.MagicMock(spec_set=CommandRunner)
+        )
+
+
+    def test_no_parameters(self, mock_metadata):
+        xml = """
+            <resource-agent>
+            </resource-agent>
+        """
+        mock_metadata.return_value = etree.XML(xml)
+        self.assertEqual(
+            self.agent.get_parameters(),
+            []
+        )
+
+
+    def test_empty_parameters(self, mock_metadata):
+        xml = """
+            <resource-agent>
+                <parameters />
+            </resource-agent>
+        """
+        mock_metadata.return_value = etree.XML(xml)
+        self.assertEqual(
+            self.agent.get_parameters(),
+            []
+        )
+
+
+    def test_empty_parameter(self, mock_metadata):
+        xml = """
+            <resource-agent>
+                <parameters>
+                    <parameter />
+                </parameters>
+            </resource-agent>
+        """
+        mock_metadata.return_value = etree.XML(xml)
+        self.assertEqual(
+            self.agent.get_parameters(),
+            [
+                {
+                    "name": "",
+                    "longdesc": "",
+                    "shortdesc": "",
+                    "type": "string",
+                    "required": False,
+                    "default": None,
+                    "advanced": False,
+                }
+            ]
+        )
+
+    def test_all_data_and_minimal_data(self, mock_metadata):
+        xml = """
+            <resource-agent>
+                <parameters>
+                    <parameter name="test_param" required="1">
+                        <longdesc>
+                            Long description
+                        </longdesc>
+                        <shortdesc>short description</shortdesc>
+                        <content type="test_type" default="default_value" />
+                    </parameter>
+                    <parameter name="another parameter"/>
+                </parameters>
+            </resource-agent>
+        """
+        mock_metadata.return_value = etree.XML(xml)
+        self.assertEqual(
+            self.agent.get_parameters(),
+            [
+                {
+                    "name": "test_param",
+                    "longdesc": "Long description",
+                    "shortdesc": "short description",
+                    "type": "test_type",
+                    "required": True,
+                    "default": "default_value",
+                    "advanced": False,
+                },
+                {
+                    "name": "another parameter",
+                    "longdesc": "",
+                    "shortdesc": "",
+                    "type": "string",
+                    "required": False,
+                    "default": None,
+                    "advanced": False,
+                }
+            ]
+        )
+
+
+ at mock.patch.object(lib_ra.Agent, "_get_metadata")
+class AgentMetadataGetActionsTest(TestCase):
+    def setUp(self):
+        self.agent = lib_ra.Agent(
+            mock.MagicMock(spec_set=CommandRunner)
+        )
+
+
+    def test_no_actions(self, mock_metadata):
+        xml = """
+            <resource-agent>
+            </resource-agent>
+        """
+        mock_metadata.return_value = etree.XML(xml)
+        self.assertEqual(
+            self.agent.get_actions(),
+            []
+        )
+
+
+    def test_empty_actions(self, mock_metadata):
+        xml = """
+            <resource-agent>
+                <actions />
+            </resource-agent>
+        """
+        mock_metadata.return_value = etree.XML(xml)
+        self.assertEqual(
+            self.agent.get_actions(),
+            []
+        )
+
+
+    def test_empty_action(self, mock_metadata):
+        xml = """
+            <resource-agent>
+                <actions>
+                    <action />
+                </actions>
+            </resource-agent>
+        """
+        mock_metadata.return_value = etree.XML(xml)
+        self.assertEqual(
+            self.agent.get_actions(),
+            [{}]
+        )
+
+
+    def test_more_actions(self, mock_metadata):
+        xml = """
+            <resource-agent>
+                <actions>
+                    <action name="on" automatic="0"/>
+                    <action name="off" />
+                    <action name="reboot" />
+                    <action name="status" />
+                </actions>
+            </resource-agent>
+        """
+        mock_metadata.return_value = etree.XML(xml)
+        self.assertEqual(
+            self.agent.get_actions(),
+            [
+                {
+                    "name": "on",
+                    "automatic": "0"
+                },
+                {"name": "off"},
+                {"name": "reboot"},
+                {"name": "status"}
+            ]
+        )
+
+
+ at mock.patch.object(lib_ra.Agent, "_get_metadata")
+ at mock.patch.object(lib_ra.Agent, "get_name", lambda self: "agent-name")
+class AgentMetadataGetInfoTest(TestCase):
+    def setUp(self):
+        self.agent = lib_ra.Agent(
+            mock.MagicMock(spec_set=CommandRunner)
+        )
+        self.metadata = etree.XML("""
+            <resource-agent>
+                <shortdesc>short description</shortdesc>
+                <longdesc>long description</longdesc>
+                <parameters>
+                    <parameter name="test_param" required="1">
+                        <longdesc>
+                            Long description
+                        </longdesc>
+                        <shortdesc>short description</shortdesc>
+                        <content type="test_type" default="default_value" />
+                    </parameter>
+                    <parameter name="another parameter"/>
+                </parameters>
+                <actions>
+                    <action name="on" automatic="0"/>
+                    <action name="off" />
+                </actions>
+            </resource-agent>
+        """)
+
+
+    def test_name_info(self, mock_metadata):
+        mock_metadata.return_value = self.metadata
+        self.assertEqual(
+            self.agent.get_name_info(),
+            {
+                "name": "agent-name",
+                "shortdesc": "",
+                "longdesc": "",
+                "parameters": [],
+                "actions": [],
+            }
+        )
+
+
+    def test_description_info(self, mock_metadata):
+        mock_metadata.return_value = self.metadata
+        self.assertEqual(
+            self.agent.get_description_info(),
+            {
+                "name": "agent-name",
+                "shortdesc": "short description",
+                "longdesc": "long description",
+                "parameters": [],
+                "actions": [],
+            }
+        )
+
+
+    def test_full_info(self, mock_metadata):
+        mock_metadata.return_value = self.metadata
+        self.assertEqual(
+            self.agent.get_full_info(),
+            {
+                "name": "agent-name",
+                "shortdesc": "short description",
+                "longdesc": "long description",
+                "parameters": [
+                    {
+                        "name": "test_param",
+                        "longdesc": "Long description",
+                        "shortdesc": "short description",
+                        "type": "test_type",
+                        "required": True,
+                        "default": "default_value",
+                        "advanced": False,
+                    },
+                    {
+                        "name": "another parameter",
+                        "longdesc": "",
+                        "shortdesc": "",
+                        "type": "string",
+                        "required": False,
+                        "default": None,
+                        "advanced": False,
+                    }
+                ],
+                "actions": [
+                    {
+                        "name": "on",
+                        "automatic": "0"
+                    },
+                    {"name": "off"},
+                ],
+            }
+        )
+
+
+ at mock.patch.object(lib_ra.Agent, "_get_metadata")
+class AgentMetadataValidateParametersValuesTest(TestCase):
+    def setUp(self):
+        self.agent = lib_ra.Agent(
+            mock.MagicMock(spec_set=CommandRunner)
+        )
+        self.metadata = etree.XML("""
+            <resource-agent>
+                <parameters>
+                    <parameter name="test_param" required="0">
+                        <longdesc>Long description</longdesc>
+                        <shortdesc>short description</shortdesc>
+                        <content type="string" default="default_value" />
+                    </parameter>
+                    <parameter name="required_param" required="1">
+                        <content type="boolean" />
+                    </parameter>
+                    <parameter name="another_required_param" required="1">
+                        <content type="string" />
+                    </parameter>
+                </parameters>
+            </resource-agent>
+        """)
+
+
+    def test_all_required(self, mock_metadata):
+        mock_metadata.return_value = self.metadata
+        self.assertEqual(
+            self.agent.validate_parameters_values({
+                "another_required_param": "value1",
+                "required_param": "value2",
+            }),
+            ([], [])
+        )
+
+
+    def test_all_required_and_optional(self, mock_metadata):
+        mock_metadata.return_value = self.metadata
+        self.assertEqual(
+            self.agent.validate_parameters_values({
+                "another_required_param": "value1",
+                "required_param": "value2",
+                "test_param": "value3",
+            }),
+            ([], [])
+        )
+
+
+    def test_all_required_and_invalid(self, mock_metadata):
+        mock_metadata.return_value = self.metadata
+        self.assertEqual(
+            self.agent.validate_parameters_values({
+                "another_required_param": "value1",
+                "required_param": "value2",
+                "invalid_param": "value3",
+            }),
+            (["invalid_param"], [])
+        )
+
+
+    def test_missing_required(self, mock_metadata):
+        mock_metadata.return_value = self.metadata
+        self.assertEqual(
+            self.agent.validate_parameters_values({
+            }),
+            ([], ["required_param", "another_required_param"])
+        )
+
+
+    def test_missing_required_and_invalid(self, mock_metadata):
+        mock_metadata.return_value = self.metadata
+        self.assertEqual(
+            self.agent.validate_parameters_values({
+                "another_required_param": "value1",
+                "invalid_param": "value3",
+            }),
+            (["invalid_param"], ["required_param"])
+        )
+
+
+class StonithdMetadataGetMetadataTest(TestCase, ExtendedAssertionsMixin):
+    def setUp(self):
+        self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        self.agent = lib_ra.StonithdMetadata(self.mock_runner)
+
+
+    def test_success(self):
+        metadata = """
+            <resource-agent>
+                <shortdesc>stonithd test metadata</shortdesc>
+            </resource-agent>
+        """
+        self.mock_runner.run.return_value = (metadata, "", 0)
+
+        assert_xml_equal(
+            str(XmlManipulation(self.agent._get_metadata())),
+            metadata
+        )
+
+        self.mock_runner.run.assert_called_once_with(
+            ["/usr/libexec/pacemaker/stonithd", "metadata"]
+        )
+
+
+    def test_failed_to_get_xml(self):
+        self.mock_runner.run.return_value = ("", "some error", 1)
+
+        self.assert_raises(
+            lib_ra.UnableToGetAgentMetadata,
+            self.agent._get_metadata,
+            {
+                "agent": "stonithd",
+                "message": "some error",
+            }
+        )
+
+        self.mock_runner.run.assert_called_once_with(
+            ["/usr/libexec/pacemaker/stonithd", "metadata"]
+        )
+
+
+    def test_invalid_xml(self):
+        self.mock_runner.run.return_value = ("some garbage", "", 0)
+
+        self.assert_raises(
+            lib_ra.UnableToGetAgentMetadata,
+            self.agent._get_metadata,
+            {
+                "agent": "stonithd",
+                "message": "Start tag expected, '<' not found, line 1, column 1",
+            }
+        )
+
+        self.mock_runner.run.assert_called_once_with(
+            ["/usr/libexec/pacemaker/stonithd", "metadata"]
+        )
+
+
+ at mock.patch.object(lib_ra.Agent, "_get_metadata")
+class StonithdMetadataGetParametersTest(TestCase):
+    def setUp(self):
+        self.agent = lib_ra.StonithdMetadata(
+            mock.MagicMock(spec_set=CommandRunner)
+        )
+
+
+    def test_success(self, mock_metadata):
+        xml = """
+            <resource-agent>
+                <parameters>
+                    <parameter name="test_param" required="0">
+                        <longdesc>
+                             Long description
+                        </longdesc>
+                        <shortdesc>
+                             Advanced use only: short description
+                        </shortdesc>
+                        <content type="test_type" default="default_value" />
+                    </parameter>
+                    <parameter name="another parameter"/>
+                </parameters>
+            </resource-agent>
+        """
+        mock_metadata.return_value = etree.XML(xml)
+        self.assertEqual(
+            self.agent.get_parameters(),
+            [
+                {
+                    "name": "test_param",
+                    "longdesc":
+                        "Advanced use only: short description\nLong "
+                        "description",
+                    "shortdesc": "Advanced use only: short description",
+                    "type": "test_type",
+                    "required": False,
+                    "default": "default_value",
+                    "advanced": True
+                },
+                {
+                    "name": "another parameter",
+                    "longdesc": "",
+                    "shortdesc": "",
+                    "type": "string",
+                    "required": False,
+                    "default": None,
+                    "advanced": False
+                }
+            ]
+        )
+
+
+class CrmAgentMetadataGetNameTest(TestCase, ExtendedAssertionsMixin):
+    def test_success(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        agent_name = "ocf:pacemaker:Dummy"
+        agent = lib_ra.CrmAgent(mock_runner, agent_name)
+
+        self.assertEqual(agent.get_name(), agent_name)
+
+
+class CrmAgentMetadataGetMetadataTest(TestCase, ExtendedAssertionsMixin):
+    def setUp(self):
+        self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        self.agent_name = "ocf:pacemaker:Dummy"
+        self.agent = lib_ra.CrmAgent(self.mock_runner, self.agent_name)
+
+
+    def test_success(self):
+        metadata = """
+            <resource-agent>
+                <shortdesc>crm agent test metadata</shortdesc>
+            </resource-agent>
+        """
+        self.mock_runner.run.return_value = (metadata, "", 0)
+
+        assert_xml_equal(
+            str(XmlManipulation(self.agent._get_metadata())),
+            metadata
+        )
+
+        self.mock_runner.run.assert_called_once_with(
+            ["/usr/sbin/crm_resource", "--show-metadata", self.agent_name],
+             env_extend={
+                 "PATH": "/usr/sbin/:/bin/:/usr/bin/",
+             }
+        )
+
+
+    def test_failed_to_get_xml(self):
+        self.mock_runner.run.return_value = ("", "some error", 1)
+
+        self.assert_raises(
+            lib_ra.UnableToGetAgentMetadata,
+            self.agent._get_metadata,
+            {
+                "agent": self.agent_name,
+                "message": "some error",
+            }
+        )
+
+        self.mock_runner.run.assert_called_once_with(
+            ["/usr/sbin/crm_resource", "--show-metadata", self.agent_name],
+             env_extend={
+                 "PATH": "/usr/sbin/:/bin/:/usr/bin/",
+             }
+        )
+
+
+    def test_invalid_xml(self):
+        self.mock_runner.run.return_value = ("some garbage", "", 0)
+
+        self.assert_raises(
+            lib_ra.UnableToGetAgentMetadata,
+            self.agent._get_metadata,
+            {
+                "agent": self.agent_name,
+                "message": "Start tag expected, '<' not found, line 1, column 1",
+            }
+        )
+
+        self.mock_runner.run.assert_called_once_with(
+            ["/usr/sbin/crm_resource", "--show-metadata", self.agent_name],
+             env_extend={
+                 "PATH": "/usr/sbin/:/bin/:/usr/bin/",
+             }
+        )
+
+
+class CrmAgentMetadataIsValidAgentTest(TestCase):
+    def setUp(self):
+        self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        self.agent_name = "ocf:pacemaker:Dummy"
+        self.agent = lib_ra.CrmAgent(self.mock_runner, self.agent_name)
+
+
+    def test_success(self):
+        metadata = """
+            <resource-agent>
+                <shortdesc>crm agent test metadata</shortdesc>
+            </resource-agent>
+        """
+        self.mock_runner.run.return_value = (metadata, "", 0)
+
+        self.assertTrue(self.agent.is_valid_metadata())
+
+
+    def test_fail(self):
+        self.mock_runner.run.return_value = ("", "", 1)
+
+        self.assertFalse(self.agent.is_valid_metadata())
+
+
+class StonithAgentMetadataGetNameTest(TestCase, ExtendedAssertionsMixin):
+    def test_success(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        agent_name = "fence_dummy"
+        agent = lib_ra.StonithAgent(mock_runner, agent_name)
+
+        self.assertEqual(agent.get_name(), agent_name)
+
+
+class StonithAgentMetadataGetMetadataTest(TestCase, ExtendedAssertionsMixin):
+    # Only test that correct name is going to crm_resource. Everything else is
+    # covered by the parent class and therefore tested in its test.
+    def setUp(self):
+        self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        self.agent_name = "fence_dummy"
+        self.agent = lib_ra.StonithAgent(
+            self.mock_runner,
+            self.agent_name
+        )
+
+
+    def tearDown(self):
+        lib_ra.StonithAgent._stonithd_metadata = None
+
+
+    def test_success(self):
+        metadata = """
+            <resource-agent>
+                <shortdesc>crm agent test metadata</shortdesc>
+            </resource-agent>
+        """
+        self.mock_runner.run.return_value = (metadata, "", 0)
+
+        assert_xml_equal(
+            str(XmlManipulation(self.agent._get_metadata())),
+            metadata
+        )
+
+        self.mock_runner.run.assert_called_once_with(
+            [
+                "/usr/sbin/crm_resource",
+                "--show-metadata",
+                "stonith:{0}".format(self.agent_name)
+            ],
+             env_extend={
+                 "PATH": "/usr/sbin/:/bin/:/usr/bin/",
+             }
+        )
+
+
+ at mock.patch.object(lib_ra.Agent, "_get_metadata")
+class StonithAgentMetadataGetActionsTest(TestCase):
+    def setUp(self):
+        self.agent = lib_ra.StonithAgent(
+            mock.MagicMock(spec_set=CommandRunner),
+            "fence_dummy"
+        )
+
+
+    def tearDown(self):
+        lib_ra.StonithAgent._stonithd_metadata = None
+
+
+    def test_more_actions(self, mock_metadata):
+        xml = """
+            <resource-agent>
+                <actions>
+                    <action name="on" automatic="0"/>
+                    <action name="off" />
+                    <action name="reboot" />
+                    <action name="status" />
+                </actions>
+            </resource-agent>
+        """
+        mock_metadata.return_value = etree.XML(xml)
+        self.assertEqual(
+            self.agent.get_actions(),
+            []
+        )
+
+
+class StonithAgentMetadataGetParametersTest(TestCase):
+    def setUp(self):
+        self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        self.agent_name = "fence_dummy"
+        self.agent = lib_ra.StonithAgent(
+            self.mock_runner,
+            self.agent_name
+        )
+
+
+    def tearDown(self):
+        lib_ra.StonithAgent._stonithd_metadata = None
+
+
+    def test_success(self):
+        metadata = """
+            <resource-agent>
+                <shortdesc>crm agent test metadata</shortdesc>
+                <parameters>
+                    <parameter name="debug"/>
+                    <parameter name="valid_param"/>
+                    <parameter name="verbose"/>
+                    <parameter name="help"/>
+                    <parameter name="action" required="1">
+                        <shortdesc>Fencing Action</shortdesc>
+                    </parameter>
+                    <parameter name="another_param"/>
+                    <parameter name="version"/>
+                </parameters>
+            </resource-agent>
+        """
+        stonithd_metadata = """
+            <resource-agent>
+                <parameters>
+                    <parameter name="stonithd_param"/>
+                </parameters>
+            </resource-agent>
+        """
+        self.mock_runner.run.side_effect = [
+            (metadata, "", 0),
+            (stonithd_metadata, "", 0),
+        ]
+
+        self.assertEqual(
+            self.agent.get_parameters(),
+            [
+                {
+                    "name": "valid_param",
+                    "longdesc": "",
+                    "shortdesc": "",
+                    "type": "string",
+                    "required": False,
+                    "default": None,
+                    "advanced": False
+                },
+                {
+                    "name": "action",
+                    "longdesc": "",
+                    "shortdesc":
+                        "Fencing Action\nWARNING: specifying 'action' is"
+                        " deprecated and not necessary with current Pacemaker"
+                        " versions."
+                    ,
+                    "type": "string",
+                    "required": False,
+                    "default": None,
+                    "advanced": False
+                },
+                {
+                    "name": "another_param",
+                    "longdesc": "",
+                    "shortdesc": "",
+                    "type": "string",
+                    "required": False,
+                    "default": None,
+                    "advanced": False
+                },
+                {
+                    "name": "stonithd_param",
+                    "longdesc": "",
+                    "shortdesc": "",
+                    "type": "string",
+                    "required": False,
+                    "default": None,
+                    "advanced": False
+                },
+            ]
+        )
+
+        self.assertEqual(2, len(self.mock_runner.run.mock_calls))
+        self.mock_runner.run.assert_has_calls([
+            mock.call(
+                [
+                    "/usr/sbin/crm_resource",
+                    "--show-metadata",
+                    "stonith:{0}".format(self.agent_name)
+                ],
+                 env_extend={
+                     "PATH": "/usr/sbin/:/bin/:/usr/bin/",
+                 }
+            ),
+            mock.call(
+                ["/usr/libexec/pacemaker/stonithd", "metadata"]
+            ),
+        ])
+
+
+ at mock.patch.object(lib_ra.Agent, "_get_metadata")
+class StonithAgentMetadataGetProvidesUnfencingTest(TestCase):
+    def setUp(self):
+        self.agent = lib_ra.StonithAgent(
+            mock.MagicMock(spec_set=CommandRunner),
+            "fence_dummy"
+        )
+
+
+    def tearDown(self):
+        lib_ra.StonithAgent._stonithd_metadata = None
+
+
+    def test_true(self, mock_metadata):
+        xml = """
+            <resource-agent>
+                <actions>
+                    <action name="off" />
+                    <action name="on" on_target="1" automatic="1"/>
+                    <action name="reboot" />
+                    <action name="status" />
+                </actions>
+            </resource-agent>
+        """
+        mock_metadata.return_value = etree.XML(xml)
+        self.assertTrue(self.agent.get_provides_unfencing())
+
+
+    def test_no_action_on(self, mock_metadata):
+        xml = """
+            <resource-agent>
+                <actions>
+                    <action name="off" />
+                    <action name="reboot" />
+                    <action name="status" />
+                </actions>
+            </resource-agent>
+        """
+        mock_metadata.return_value = etree.XML(xml)
+        self.assertFalse(self.agent.get_provides_unfencing())
+
+
+    def test_no_tagret(self, mock_metadata):
+        xml = """
+            <resource-agent>
+                <actions>
+                    <action name="off" />
+                    <action name="on" automatic="1"/>
+                    <action name="reboot" />
+                    <action name="status" />
+                </actions>
+            </resource-agent>
+        """
+        mock_metadata.return_value = etree.XML(xml)
+        self.assertFalse(self.agent.get_provides_unfencing())
+
+
+    def test_no_automatic(self, mock_metadata):
+        xml = """
+            <resource-agent>
+                <actions>
+                    <action name="off" />
+                    <action name="on" on_target="1" />
+                    <action name="reboot" />
+                    <action name="status" />
+                </actions>
+            </resource-agent>
+        """
+        mock_metadata.return_value = etree.XML(xml)
+        self.assertFalse(self.agent.get_provides_unfencing())
+
+class ResourceAgentTest(TestCase):
+    def test_raises_on_invalid_name(self):
+        self.assertRaises(
+            lib_ra.InvalidResourceAgentName,
+            lambda: lib_ra.ResourceAgent(mock.MagicMock(), "invalid_name")
+        )
+
+    def test_does_not_raise_on_valid_name(self):
+        lib_ra.ResourceAgent(mock.MagicMock(), "formal:valid:name")
+
+class FindResourceAgentByNameTest(TestCase):
+    def setUp(self):
+        self.report_processor = mock.MagicMock()
+        self.runner = mock.MagicMock()
+        self.run = partial(
+            lib_ra.find_valid_resource_agent_by_name,
+            self.report_processor,
+            self.runner,
+        )
+
+    @patch_agent("reports.agent_name_guessed")
+    @patch_agent("guess_exactly_one_resource_agent_full_name")
+    def test_returns_guessed_agent(self, mock_guess, mock_report):
+        #setup
+        name = "Delay"
+        guessed_name =  "ocf:heartbeat:Delay"
+        report = "AGENT_NAME_GUESSED"
+
+        agent = mock.MagicMock(get_name=mock.Mock(return_value=guessed_name))
+        mock_guess.return_value = agent
+        mock_report.return_value = report
+
+        #test
+        self.assertEqual(agent, self.run(name))
+        mock_guess.assert_called_once_with(self.runner, name)
+        self.report_processor.process.assert_called_once_with(report)
+        mock_report.assert_called_once_with(name, guessed_name)
+
+    @patch_agent("ResourceAgent")
+    def test_returns_real_agent_when_is_there(self, ResourceAgent):
+        #setup
+        name = "ocf:heartbeat:Delay"
+
+        agent = mock.MagicMock()
+        agent.validate_metadata = mock.Mock(return_value=agent)
+        ResourceAgent.return_value = agent
+
+        #test
+        self.assertEqual(agent, self.run(name))
+        ResourceAgent.assert_called_once_with(self.runner, name)
+
+    @patch_agent("resource_agent_error_to_report_item")
+    @patch_agent("AbsentResourceAgent")
+    @patch_agent("ResourceAgent")
+    def test_returns_absent_agent_on_metadata_load_fail(
+        self, ResourceAgent, AbsentResourceAgent, error_to_report_item
+    ):
+        #setup
+        name = "ocf:heartbeat:Some"
+        report = "UNABLE_TO_GET_AGENT_METADATA"
+        e = lib_ra.UnableToGetAgentMetadata(name, "metadata missing")
+        agent = 'absent agent'
+
+        ResourceAgent.side_effect = e
+        error_to_report_item.return_value = report
+        AbsentResourceAgent.return_value = agent
+
+        #test
+        self.assertEqual(agent, self.run(name, allowed_absent=True))
+        ResourceAgent.assert_called_once_with(self.runner, name)
+        AbsentResourceAgent.assert_called_once_with(self.runner, name)
+        error_to_report_item.assert_called_once_with(
+            e, severity=severity.WARNING, forceable=True
+        )
+        self.report_processor.process.assert_called_once_with(report)
+
+    @patch_agent("resource_agent_error_to_report_item")
+    @patch_agent("ResourceAgent")
+    def test_raises_on_metatdata_load_fail_disallowed_absent(
+        self, ResourceAgent, error_to_report_item
+    ):
+        name = "ocf:heartbeat:Some"
+        report = "UNABLE_TO_GET_AGENT_METADATA"
+        e = lib_ra.UnableToGetAgentMetadata(name, "metadata missing")
+
+        ResourceAgent.side_effect = e
+        error_to_report_item.return_value = report
+
+        with self.assertRaises(LibraryError) as context_manager:
+            self.run(name)
+
+        self.assertEqual(report, context_manager.exception.args[0])
+        ResourceAgent.assert_called_once_with(self.runner, name)
+        error_to_report_item.assert_called_once_with(e)
+
+    @patch_agent("resource_agent_error_to_report_item")
+    @patch_agent("ResourceAgent")
+    def test_raises_on_invalid_name(self, ResourceAgent, error_to_report_item):
+        name = "ocf:heartbeat:Something:else"
+        report = "INVALID_RESOURCE_AGENT_NAME"
+        e = lib_ra.InvalidResourceAgentName(name, "invalid agent name")
+
+        ResourceAgent.side_effect = e
+        error_to_report_item.return_value = report
+
+        with self.assertRaises(LibraryError) as context_manager:
+            self.run(name)
+
+        self.assertEqual(report, context_manager.exception.args[0])
+        ResourceAgent.assert_called_once_with(self.runner, name)
+        error_to_report_item.assert_called_once_with(e)
+
+class AbsentResourceAgentTest(TestCase):
+    @mock.patch.object(lib_ra.CrmAgent, "_load_metadata")
+    def test_behaves_like_a_proper_agent(self, load_metadata):
+        name =  "ocf:heartbeat:Absent"
+        runner = mock.MagicMock(spec_set=CommandRunner)
+        load_metadata.return_value = "<resource-agent/>"
+
+        agent = lib_ra.ResourceAgent(runner, name)
+        absent = lib_ra.AbsentResourceAgent(runner, name)
+
+        #metadata are valid
+        absent.validate_metadata()
+        self.assertTrue(absent.is_valid_metadata())
+
+        self.assertEqual(agent.get_name(), absent.get_name())
+        self.assertEqual(
+            agent.get_description_info(), absent.get_description_info()
+        )
+        self.assertEqual(agent.get_full_info(), absent.get_full_info())
+        self.assertEqual(agent.get_shortdesc(), absent.get_shortdesc())
+        self.assertEqual(agent.get_longdesc(), absent.get_longdesc())
+        self.assertEqual(agent.get_parameters(), absent.get_parameters())
+        self.assertEqual(agent.get_actions(), absent.get_actions())
+        self.assertEqual(([], []), absent.validate_parameters_values({
+            "whatever": "anything"
+        }))
diff --git a/pcs/pcs.8 b/pcs/pcs.8
index 88c4151..0cf1be4 100644
--- a/pcs/pcs.8
+++ b/pcs/pcs.8
@@ -1,4 +1,4 @@
-.TH PCS "8" "September 2016" "pcs 0.9.154" "System Administration Utilities"
+.TH PCS "8" "November 2016" "pcs 0.9.155" "System Administration Utilities"
 .SH NAME
 pcs \- pacemaker/corosync configuration system
 .SH SYNOPSIS
@@ -67,13 +67,13 @@ Manage pacemaker alerts.
 [show [<resource id>] | \fB\-\-full\fR | \fB\-\-groups\fR | \fB\-\-hide\-inactive\fR]
 Show all currently configured resources or if a resource is specified show the options for the configured resource.  If \fB\-\-full\fR is specified, all configured resource options will be displayed.  If \fB\-\-groups\fR is specified, only show groups (and their resources).  If \fB\-\-hide\-inactive\fR is specified, only show active resources.
 .TP
-list [<standard|provider|type>] [\fB\-\-nodesc\fR]
-Show list of all available resources, optionally filtered by specified type, standard or provider. If \fB\-\-nodesc\fR is used then descriptions of resources are not printed.
+list [filter] [\fB\-\-nodesc\fR]
+Show list of all available resource agents (if filter is provided then only resource agents matching the filter will be shown). If --nodesc is used then descriptions of resource agents are not printed.
 .TP
-describe <standard:provider:type|type>
+describe [<standard>:[<provider>:]]<type>
 Show options for the specified resource.
 .TP
-create <resource id> <standard:provider:type|type> [resource options] [op <operation action> <operation options> [<operation action> <operation options>]...] [meta <meta options>...] [\fB\-\-clone\fR <clone options> | \fB\-\-master\fR <master options> | \fB\-\-group\fR <group id> [\fB\-\-before\fR <resource id> | \fB\-\-after\fR <resource id>]] [\fB\-\-disabled\fR] [\fB\-\-wait\fR[=n]]
+create <resource id> [<standard>:[<provider>:]]<type> [resource options] [op <operation action> <operation options> [<operation action> <operation options>]...] [meta <meta options>...] [\fB\-\-clone\fR <clone options> | \fB\-\-master\fR <master options> | \fB\-\-group\fR <group id> [\fB\-\-before\fR <resource id> | \fB\-\-after\fR <resource id>]] [\fB\-\-disabled\fR] [\fB\-\-wait\fR[=n]]
 Create specified resource.  If \fB\-\-clone\fR is used a clone resource is created.  If \fB\-\-master\fR is specified a master/slave resource is created.  If \fB\-\-group\fR is specified the resource is added to the group named.  You can use \fB\-\-before\fR or \fB\-\-after\fR to specify the position of the added resource relatively to some resource already existing in the group.  If \fB\-\-disabled\fR is specified the resource is not started automatically.  If \fB\-\-wait\fR is specifie [...]
 
 Example: Create a new resource called 'VirtualIP' with IP address 192.168.0.99, netmask of 32, monitored everything 30 seconds, on eth2: pcs resource create VirtualIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 nic=eth2 op monitor interval=30s
@@ -345,6 +345,8 @@ confirm <node> [\fB\-\-force\fR]
 Confirm that the host specified is currently down.  This command should \fBONLY\fR be used when the node specified has already been confirmed to be powered off and to have no access to shared resources.
 
 .B WARNING: If this node is not actually powered off or it does have access to shared resources, data corruption/cluster failure can occur. To prevent accidental running of this command, \-\-force or interactive user response is required in order to proceed.
+
+NOTE: It is not checked if the specified node exists in the cluster in order to be able to work with nodes not visible from the local cluster partition.
 .TP
 sbd enable [\fB\-\-watchdog\fR=<path>[@<node>]] ... [<SBD_OPTION>=<value>] ...
 Enable SBD in cluster. Default path for watchdog device is /dev/watchdog. Allowed SBD options: SBD_WATCHDOG_TIMEOUT (default: 5), SBD_DELAY_START (default: no) and SBD_STARTMODE (default: clean).
@@ -383,19 +385,19 @@ Create a role with the id and (optional) description specified.  Each role can a
 role delete <role id>
 Delete the role specified and remove it from any users/groups it was assigned to.
 .TP
-role assign <role id> [to] <username/group>
-Assign a role to a user or group already created with 'pcs acl user/group create'.
+role assign <role id> [to] [user|group] <username/group>
+Assign a role to a user or group already created with 'pcs acl user/group create'. If there is user and group with the same id and it is not specified which should be used, user will be prioritized. In cases like this specify whenever user or group should be used.
 .TP
-role unassign <role id> [from] <username/group>
-Remove a role from the specified user.
+role unassign <role id> [from] [user|group] <username/group>
+Remove a role from the specified user. If there is user and group with the same id and it is not specified which should be used, user will be prioritized. In cases like this specify whenever user or group should be used.
 .TP
-user create <username> <role id> [<role id>]...
+user create <username> [<role id>]...
 Create an ACL for the user specified and assign roles to the user.
 .TP
 user delete <username>
 Remove the user specified (and roles assigned will be unassigned for the specified user).
 .TP
-group create <group> <role id> [<role id>]...
+group create <group> [<role id>]...
 Create an ACL for the group specified and assign roles to the group.
 .TP
 group delete <group>
@@ -731,8 +733,8 @@ Define an alert handler with specified path. Id will be automatically generated
 update <alert\-id> [path=<path>] [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...]
 Update existing alert handler with specified id.
 .TP
-remove <alert\-id>
-Remove alert handler with specified id.
+remove <alert\-id> ...
+Remove alert handlers with specified ids.
 .TP
 recipient add <alert\-id> value=<recipient\-value> [id=<recipient\-id>] [description=<description>] [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
 Add new recipient to specified alert handler.
@@ -740,8 +742,8 @@ Add new recipient to specified alert handler.
 recipient update <recipient\-id> [value=<recipient\-value>] [description=<description>] [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
 Update existing recipient identified by it's id.
 .TP
-recipient remove <recipient\-id>
-Remove specified recipient.
+recipient remove <recipient\-id> ...
+Remove specified recipients.
 .SH EXAMPLES
 .TP
 Show all resources
diff --git a/pcs/resource.py b/pcs/resource.py
index a5bcf7c..54c77c3 100644
--- a/pcs/resource.py
+++ b/pcs/resource.py
@@ -6,7 +6,6 @@ from __future__ import (
 )
 
 import sys
-import os
 import xml.dom.minidom
 from xml.dom.minidom import getDOMImplementation
 from xml.dom.minidom import parseString
@@ -19,13 +18,11 @@ from pcs import (
     usage,
     utils,
     constraint,
-    settings,
 )
 from pcs.settings import pacemaker_wait_timeout_status as \
     PACEMAKER_WAIT_TIMEOUT_STATUS
 import pcs.lib.cib.acl as lib_acl
 import pcs.lib.pacemaker as lib_pacemaker
-from pcs.lib.external import get_systemd_services
 from pcs.cli.common.errors import CmdLineInputError
 from pcs.cli.common.parse_args import prepare_options
 from pcs.lib.errors import LibraryError
@@ -36,170 +33,157 @@ import pcs.lib.resource_agent as lib_ra
 RESOURCE_RELOCATE_CONSTRAINT_PREFIX = "pcs-relocate-"
 
 def resource_cmd(argv):
-    if len(argv) == 0:
-        argv = ["show"]
-
-    sub_cmd = argv.pop(0)
-    if (sub_cmd == "help"):
-        usage.resource(argv)
-    elif (sub_cmd == "list"):
-        resource_list_available(argv)
-    elif (sub_cmd == "describe"):
-        if len(argv) == 1:
-            resource_list_options(argv[0])
-        else:
-            usage.resource()
-            sys.exit(1)
-    elif (sub_cmd == "create"):
-        if len(argv) < 2:
-            usage.resource()
-            sys.exit(1)
-        res_id = argv.pop(0)
-        res_type = argv.pop(0)
-        ra_values, op_values, meta_values, clone_opts = parse_resource_options(
-            argv, with_clone=True
-        )
-        try:
+    if len(argv) < 1:
+        sub_cmd, argv_next = "show", []
+    else:
+        sub_cmd, argv_next = argv[0], argv[1:]
+
+    lib = utils.get_library_wrapper()
+    modifiers = utils.get_modificators()
+
+    try:
+        if sub_cmd == "help":
+            usage.resource(argv)
+        elif sub_cmd == "list":
+            resource_list_available(lib, argv_next, modifiers)
+        elif sub_cmd == "describe":
+            resource_list_options(lib, argv_next, modifiers)
+        elif sub_cmd == "create":
+            if len(argv_next) < 2:
+                usage.resource(["create"])
+                sys.exit(1)
+            res_id = argv_next.pop(0)
+            res_type = argv_next.pop(0)
+            ra_values, op_values, meta_values, clone_opts = parse_resource_options(
+                argv_next, with_clone=True
+            )
             resource_create(
                 res_id, res_type, ra_values, op_values, meta_values, clone_opts,
                 group=utils.pcs_options.get("--group", None)
             )
-        except CmdLineInputError as e:
-            utils.exit_on_cmdline_input_errror(e, "resource", 'create')
-    elif (sub_cmd == "move"):
-        resource_move(argv)
-    elif (sub_cmd == "ban"):
-        resource_move(argv,False,True)
-    elif (sub_cmd == "clear"):
-        resource_move(argv,True)
-    elif (sub_cmd == "standards"):
-        resource_standards()
-    elif (sub_cmd == "providers"):
-        resource_providers()
-    elif (sub_cmd == "agents"):
-        resource_agents(argv)
-    elif (sub_cmd == "update"):
-        if len(argv) == 0:
-            usage.resource()
-            sys.exit(1)
-        res_id = argv.pop(0)
-        try:
-            resource_update(res_id,argv)
-        except CmdLineInputError as e:
-            utils.exit_on_cmdline_input_errror(e, "resource", 'update')
-    elif (sub_cmd == "add_operation"):
-        utils.err("add_operation has been deprecated, please use 'op add'")
-    elif (sub_cmd == "remove_operation"):
-        utils.err("remove_operation has been deprecated, please use 'op remove'")
-    elif (sub_cmd == "meta"):
-        if len(argv) < 2:
-            usage.resource()
-            sys.exit(1)
-        res_id = argv.pop(0)
-        resource_meta(res_id,argv)
-    elif (sub_cmd == "delete"):
-        if len(argv) == 0:
-            usage.resource()
-            sys.exit(1)
-        res_id = argv.pop(0)
-        resource_remove(res_id)
-    elif (sub_cmd == "show"):
-        resource_show(argv)
-    elif (sub_cmd == "group"):
-        resource_group(argv)
-    elif (sub_cmd == "ungroup"):
-        resource_group(["remove"] + argv)
-    elif (sub_cmd == "clone"):
-        try:
-            resource_clone(argv)
-        except CmdLineInputError as e:
-            utils.exit_on_cmdline_input_errror(e, "resource", 'clone')
-    elif (sub_cmd == "unclone"):
-        resource_clone_master_remove(argv)
-    elif (sub_cmd == "master"):
-        try:
-            resource_master(argv)
-        except CmdLineInputError as e:
-            utils.exit_on_cmdline_input_errror(e, "resource", 'master')
-    elif (sub_cmd == "enable"):
-        resource_enable(argv)
-    elif (sub_cmd == "disable"):
-        resource_disable(argv)
-    elif (sub_cmd == "restart"):
-        resource_restart(argv)
-    elif (sub_cmd == "debug-start"):
-        resource_force_action(sub_cmd, argv)
-    elif (sub_cmd == "debug-stop"):
-        resource_force_action(sub_cmd, argv)
-    elif (sub_cmd == "debug-promote"):
-        resource_force_action(sub_cmd, argv)
-    elif (sub_cmd == "debug-demote"):
-        resource_force_action(sub_cmd, argv)
-    elif (sub_cmd == "debug-monitor"):
-        resource_force_action(sub_cmd, argv)
-    elif (sub_cmd == "manage"):
-        resource_manage(argv, True)
-    elif (sub_cmd == "unmanage"):
-        resource_manage(argv, False)
-    elif (sub_cmd == "failcount"):
-        resource_failcount(argv)
-    elif (sub_cmd == "op"):
-        if len(argv) < 1:
-            usage.resource(["op"])
-            sys.exit(1)
-        op_subcmd = argv.pop(0)
-        if op_subcmd == "defaults":
-            if len(argv) == 0:
-                show_defaults("op_defaults")
-            else:
-                set_default("op_defaults", argv)
-        elif op_subcmd == "add":
-            if len(argv) == 0:
-                usage.resource(["op"])
+        elif sub_cmd == "move":
+            resource_move(argv_next)
+        elif sub_cmd == "ban":
+            resource_move(argv_next, False, True)
+        elif sub_cmd == "clear":
+            resource_move(argv_next, True)
+        elif sub_cmd == "standards":
+            resource_standards(lib, argv_next, modifiers)
+        elif sub_cmd == "providers":
+            resource_providers(lib, argv_next, modifiers)
+        elif sub_cmd == "agents":
+            resource_agents(lib, argv_next, modifiers)
+        elif sub_cmd == "update":
+            if len(argv_next) == 0:
+                usage.resource(["update"])
                 sys.exit(1)
-            else:
-                res_id = argv.pop(0)
-                utils.replace_cib_configuration(
-                    resource_operation_add(utils.get_cib_dom(), res_id, argv)
-                )
-        elif op_subcmd in ["remove","delete"]:
-            if len(argv) == 0:
+            res_id = argv_next.pop(0)
+            resource_update(res_id, argv_next)
+        elif sub_cmd == "add_operation":
+            utils.err("add_operation has been deprecated, please use 'op add'")
+        elif sub_cmd == "remove_operation":
+            utils.err("remove_operation has been deprecated, please use 'op remove'")
+        elif sub_cmd == "meta":
+            if len(argv_next) < 2:
+                usage.resource(["meta"])
+                sys.exit(1)
+            res_id = argv_next.pop(0)
+            resource_meta(res_id, argv_next)
+        elif sub_cmd == "delete":
+            if len(argv_next) == 0:
+                usage.resource(["delete"])
+                sys.exit(1)
+            res_id = argv_next.pop(0)
+            resource_remove(res_id)
+        elif sub_cmd == "show":
+            resource_show(argv_next)
+        elif sub_cmd == "group":
+            resource_group(argv_next)
+        elif sub_cmd == "ungroup":
+            resource_group(["remove"] + argv_next)
+        elif sub_cmd == "clone":
+            resource_clone(argv_next)
+        elif sub_cmd == "unclone":
+            resource_clone_master_remove(argv_next)
+        elif sub_cmd == "master":
+            resource_master(argv_next)
+        elif sub_cmd == "enable":
+            resource_enable(argv_next)
+        elif sub_cmd == "disable":
+            resource_disable(argv_next)
+        elif sub_cmd == "restart":
+            resource_restart(argv_next)
+        elif sub_cmd == "debug-start":
+            resource_force_action(sub_cmd, argv_next)
+        elif sub_cmd == "debug-stop":
+            resource_force_action(sub_cmd, argv_next)
+        elif sub_cmd == "debug-promote":
+            resource_force_action(sub_cmd, argv_next)
+        elif sub_cmd == "debug-demote":
+            resource_force_action(sub_cmd, argv_next)
+        elif sub_cmd == "debug-monitor":
+            resource_force_action(sub_cmd, argv_next)
+        elif sub_cmd == "manage":
+            resource_manage(argv_next, True)
+        elif sub_cmd == "unmanage":
+            resource_manage(argv_next, False)
+        elif sub_cmd == "failcount":
+            resource_failcount(argv_next)
+        elif sub_cmd == "op":
+            if len(argv_next) < 1:
                 usage.resource(["op"])
                 sys.exit(1)
+            op_subcmd = argv_next.pop(0)
+            if op_subcmd == "defaults":
+                if len(argv_next) == 0:
+                    show_defaults("op_defaults")
+                else:
+                    set_default("op_defaults", argv_next)
+            elif op_subcmd == "add":
+                if len(argv_next) == 0:
+                    usage.resource(["op"])
+                    sys.exit(1)
+                else:
+                    res_id = argv_next.pop(0)
+                    utils.replace_cib_configuration(
+                        resource_operation_add(
+                            utils.get_cib_dom(), res_id, argv_next
+                        )
+                    )
+            elif op_subcmd in ["remove", "delete"]:
+                if len(argv_next) == 0:
+                    usage.resource(["op"])
+                    sys.exit(1)
+                else:
+                    res_id = argv_next.pop(0)
+                    resource_operation_remove(res_id, argv_next)
+        elif sub_cmd == "defaults":
+            if len(argv_next) == 0:
+                show_defaults("rsc_defaults")
             else:
-                res_id = argv.pop(0)
-                resource_operation_remove(res_id, argv)
-    elif (sub_cmd == "defaults"):
-        if len(argv) == 0:
-            show_defaults("rsc_defaults")
-        else:
-            set_default("rsc_defaults", argv)
-    elif (sub_cmd == "cleanup"):
-        try:
-            resource_cleanup(argv)
-        except CmdLineInputError as e:
-            utils.exit_on_cmdline_input_errror(e, "resource", 'cleanup')
-        except LibraryError as e:
-            utils.process_library_reports(e.args)
-    elif (sub_cmd == "history"):
-        resource_history(argv)
-    elif (sub_cmd == "relocate"):
-        resource_relocate(argv)
-    elif (sub_cmd == "utilization"):
-        if len(argv) == 0:
-            print_resources_utilization()
-        elif len(argv) == 1:
-            print_resource_utilization(argv.pop(0))
+                set_default("rsc_defaults", argv_next)
+        elif sub_cmd == "cleanup":
+            resource_cleanup(argv_next)
+        elif sub_cmd == "history":
+            resource_history(argv_next)
+        elif sub_cmd == "relocate":
+            resource_relocate(argv_next)
+        elif sub_cmd == "utilization":
+            if len(argv_next) == 0:
+                print_resources_utilization()
+            elif len(argv_next) == 1:
+                print_resource_utilization(argv_next.pop(0))
+            else:
+                set_resource_utilization(argv_next.pop(0), argv_next)
+        elif sub_cmd == "get_resource_agent_info":
+            get_resource_agent_info(argv_next)
         else:
-            try:
-                set_resource_utilization(argv.pop(0), argv)
-            except CmdLineInputError as e:
-                utils.exit_on_cmdline_input_errror(e, "resource", "utilization")
-    elif (sub_cmd == "get_resource_agent_info"):
-        get_resource_agent_info(argv)
-    else:
-        usage.resource()
-        sys.exit(1)
+            usage.resource()
+            sys.exit(1)
+    except LibraryError as e:
+        utils.process_library_reports(e.args)
+    except CmdLineInputError as e:
+        utils.exit_on_cmdline_input_errror(e, "resource", sub_cmd)
 
 def parse_resource_options(argv, with_clone=False):
     ra_values = []
@@ -243,212 +227,126 @@ def parse_resource_options(argv, with_clone=False):
         return ra_values, op_values, meta_values, clone_opts
     return ra_values, op_values, meta_values
 
-# List available resources
-# TODO make location more easily configurable
-def resource_list_available(argv):
-    def get_name_and_desc(agent_name, shortdesc):
-        sd = ""
-        if len(shortdesc) > 0:
-            sd = " - " + format_desc(
-                len(agent_name + " - "),
-                shortdesc.replace("\n", " ")
-            )
-        return agent_name + sd
 
-    ret = []
-    if len(argv) != 0:
-        filter_string = argv[0]
-    else:
-        filter_string = ""
-
-    # ocf agents
-    providers = sorted(os.listdir(settings.ocf_resources))
-    for provider in providers:
-        resources = sorted(os.listdir(os.path.join(
-            settings.ocf_resources, provider
-        )))
-        for resource in resources:
-            if resource.startswith(".") or resource == "ocf-shellfuncs":
-                continue
-            full_res_name = "ocf:" + provider + ":" + resource
-            if full_res_name.lower().count(filter_string.lower()) == 0:
-                continue
+def resource_list_available(lib, argv, modifiers):
+    if len(argv) > 1:
+        raise CmdLineInputError()
 
-            if "--nodesc" in utils.pcs_options:
-                ret.append(full_res_name)
-                continue
+    search = argv[0] if argv else None
+    agent_list = lib.resource_agent.list_agents(modifiers["describe"], search)
 
-            try:
-                metadata = lib_ra.get_resource_agent_metadata(
-                    utils.cmd_runner(), full_res_name
-                )
-                ret.append(get_name_and_desc(
-                    full_res_name,
-                    lib_ra.get_agent_desc(metadata)["shortdesc"]
-                ))
-            except (LibraryError, lib_ra.ResourceAgentLibError):
-                pass
-
-    # lsb agents
-    lsb_dir = "/etc/init.d/"
-    agents = sorted(os.listdir(lsb_dir))
-    for agent in agents:
-        if os.access(lsb_dir + agent, os.X_OK):
-            ret.append("lsb:" + agent)
-
-    # systemd agents
-    for service in get_systemd_services(utils.cmd_runner()):
-        ret.append("systemd:{0}".format(service))
-
-    # nagios metadata
-    if os.path.isdir(settings.nagios_metadata_path):
-        for metadata_file in sorted(os.listdir(settings.nagios_metadata_path)):
-            if metadata_file.startswith("."):
-                continue
-            full_res_name = "nagios:" + metadata_file
-            if full_res_name.lower().endswith(".xml"):
-                full_res_name = full_res_name[:-len(".xml")]
-            if "--nodesc" in utils.pcs_options:
-                ret.append(full_res_name)
-                continue
-            try:
-                metadata = lib_ra.get_resource_agent_metadata(
-                    utils.cmd_runner(),
-                    full_res_name
-                )
-                ret.append(get_name_and_desc(
-                    full_res_name,
-                    lib_ra.get_agent_desc(metadata)["shortdesc"]
-                ))
-            except (LibraryError, lib_ra.ResourceAgentLibError):
-                pass
-
-    # output
-    if not ret:
+    if not agent_list:
+        if search:
+            utils.err("No resource agents matching the filter.")
         utils.err(
             "No resource agents available. "
             "Do you have resource agents installed?"
         )
-    if filter_string != "":
-        found = False
-        for rline in ret:
-            if rline.lower().find(filter_string.lower()) != -1:
-                print(rline)
-                found = True
-        if not found:
-            utils.err("No resource agents matching the filter.")
-    else:
-        print("\n".join(ret))
 
+    for agent_info in agent_list:
+        name = agent_info["name"]
+        shortdesc = agent_info["shortdesc"]
+        if shortdesc:
+            print("{0} - {1}".format(
+                name,
+                _format_desc(len(name + " - "), shortdesc.replace("\n", " "))
+            ))
+        else:
+            print(name)
 
-def resource_print_options(agent_name, desc, params, actions):
-    if desc["shortdesc"]:
-        agent_name += " - " + format_desc(
-            len(agent_name + " - "), desc["shortdesc"]
-        )
-    print(agent_name)
-    if desc["longdesc"]:
-        print()
-        print(desc["longdesc"])
-
-    if len(params) > 0:
-        print()
-        print("Resource options:")
-    for param in params:
-        if param.get("advanced", False):
-            continue
-        name = param["name"]
-        if param["required"]:
-            name += " (required)"
-        desc = param["longdesc"].replace("\n", " ")
-        if not desc:
-            desc = param["shortdesc"].replace("\n", " ")
-            if not desc:
-                desc = "No description available"
-        indent = len(name) + 4
-        desc = format_desc(indent, desc)
-        print("  " + name + ": " + desc)
-
-    if actions:
-        print()
-        print("Default operations:")
-        action_lines = []
-        for action in utils.filter_default_op_from_actions(actions):
+
+def resource_list_options(lib, argv, modifiers):
+    if len(argv) != 1:
+        raise CmdLineInputError()
+    agent_name = argv[0]
+
+    print(_format_agent_description(
+        lib.resource_agent.describe_agent(agent_name)
+    ))
+
+
+def _format_agent_description(description, stonith=False):
+    output = []
+
+    if description.get("name") and description.get("shortdesc"):
+        output.append("{0} - {1}".format(
+            description["name"],
+            _format_desc(
+                len(description["name"] + " - "),
+                description["shortdesc"]
+            )
+        ))
+    elif description.get("name"):
+        output.append(description["name"])
+    elif description.get("shortdesc"):
+        output.append(description["shortdesc"])
+
+    if description.get("longdesc"):
+        output.append("")
+        output.append(description["longdesc"])
+
+    if description.get("parameters"):
+        output_params = []
+        for param in description["parameters"]:
+            if param.get("advanced", False):
+                continue
+            param_title = " ".join(filter(None, [
+                param.get("name"),
+                "(required)" if param.get("required", False) else None
+            ]))
+            param_desc = param.get("longdesc", "").replace("\n", " ")
+            if not param_desc:
+                param_desc = param.get("shortdesc", "").replace("\n", " ")
+                if not param_desc:
+                    param_desc = "No description available"
+            output_params.append("  {0}: {1}".format(
+                param_title,
+                _format_desc(len(param_title) + 4, param_desc)
+            ))
+        if output_params:
+            output.append("")
+            if stonith:
+                output.append("Stonith options:")
+            else:
+                output.append("Resource options:")
+            output.extend(output_params)
+
+    if description.get("actions"):
+        output_actions = []
+        for action in utils.filter_default_op_from_actions(
+            description["actions"]
+        ):
             parts = ["  {0}:".format(action.get("name", ""))]
             parts.extend([
                 "{0}={1}".format(name, value)
                 for name, value in sorted(action.items())
                 if name != "name"
             ])
-            action_lines.append(" ".join(parts))
-        print("\n".join(action_lines))
-
-def resource_list_options(resource):
-    runner = utils.cmd_runner()
-
-    def get_desc_params(agent_name):
-        metadata_dom = lib_ra.get_resource_agent_metadata(
-            runner, agent_name
-        )
-        desc = lib_ra.get_agent_desc(metadata_dom)
-        params = lib_ra.get_resource_agent_parameters(metadata_dom)
-        actions = lib_ra.get_agent_actions(metadata_dom)
-        return desc, params, actions
+            output_actions.append(" ".join(parts))
+        if output_actions:
+            output.append("")
+            output.append("Default operations:")
+            output.extend(output_actions)
 
-    found_resource = False
+    return "\n".join(output)
 
-    try:
-        descriptions, parameters, actions = get_desc_params(resource)
-        resource_print_options(resource, descriptions, parameters, actions)
-        return
-    except lib_ra.UnsupportedResourceAgent:
-        pass
-    except lib_ra.ResourceAgentLibError as e:
-        utils.process_library_reports(
-            [lib_ra.resource_agent_lib_error_to_report_item(e)]
-        )
-    except LibraryError as e:
-        utils.process_library_reports(e.args)
 
-    # no standard was given, let's search all ocf providers first
-    providers = sorted(os.listdir(settings.ocf_resources))
-    for provider in providers:
-        if not os.path.exists(
-            os.path.join(settings.ocf_resources, provider, resource)
-        ):
-            continue
-        try:
-            agent = "ocf:{0}:{1}".format(provider, resource)
-            descriptions, parameters, actions = get_desc_params(agent)
-            resource_print_options(agent, descriptions, parameters, actions)
-            return
-        except (LibraryError, lib_ra.ResourceAgentLibError):
-            pass
-
-    # still not found, now let's take a look at nagios plugins
-    if not found_resource:
-        try:
-            agent = "nagios:" + resource
-            descriptions, parameters, actions = get_desc_params(agent)
-            resource_print_options(agent, descriptions, parameters, actions)
-        except (LibraryError, lib_ra.ResourceAgentLibError):
-            utils.err("Unable to find resource: {0}".format(resource))
-
-# Return the string formatted with a line length of 79 and indented
-def format_desc(indent, desc):
+# Return the string formatted with a line length of terminal width  and indented
+def _format_desc(indent, desc):
     desc = " ".join(desc.split())
     dummy_rows, columns = utils.getTerminalSize()
     columns = int(columns)
     if columns < 40:
         columns = 40
     afterindent = columns - indent
+    if afterindent < 1:
+        afterindent = columns
+
     output = ""
     first = True
-
     for line in textwrap.wrap(desc, afterindent):
         if not first:
-            for _ in range(0,indent):
-                output += " "
+            output += " " * indent
         output += line
         output += "\n"
         first = False
@@ -480,17 +378,55 @@ def resource_create(
     if not ra_id_valid:
         utils.err(ra_id_error)
 
-    dom = utils.get_cib_dom()
 
-    # If we're not using --force, try to change the case of ra_type to match any
-    # installed resources
-    if "--force" not in utils.pcs_options:
-        new_ra_type = utils.is_valid_resource(ra_type, True)
-        if new_ra_type != True and new_ra_type != False:
-            ra_type = new_ra_type
+    try:
+        if ":" in ra_type:
+            full_agent_name = ra_type
+            if full_agent_name.startswith("stonith:"):
+                # Maybe we can just try to get a metadata object and if it fails
+                # then we know the agent is not valid. Then the is_valid_agent
+                # method can be completely removed.
+                is_valid_agent = lib_ra.StonithAgent(
+                    utils.cmd_runner(),
+                    full_agent_name[len("stonith:"):]
+                ).is_valid_metadata()
+            else:
+                is_valid_agent = lib_ra.ResourceAgent(
+                    utils.cmd_runner(),
+                    full_agent_name
+                ).is_valid_metadata()
+            if not is_valid_agent:
+                if "--force" not in utils.pcs_options:
+                    utils.err("Unable to create resource '{0}', it is not installed on this system (use --force to override)".format(full_agent_name))
+                elif not full_agent_name.startswith("stonith:"):
+                    # stonith is covered in stonith.stonith_create
+                    if not re.match("^[^:]+(:[^:]+){1,2}$", full_agent_name):
+                        utils.err(
+                            "Invalid resource agent name '{0}'".format(
+                                full_agent_name
+                            )
+                        )
+                    print(
+                        "Warning: '{0}' is not installed or does not provide valid metadata".format(
+                            full_agent_name
+                        )
+                    )
+        else:
+            full_agent_name = lib_ra.guess_exactly_one_resource_agent_full_name(
+                utils.cmd_runner(),
+                ra_type
+            ).get_name()
+            print("Creating resource '{0}'".format(full_agent_name))
+    except lib_ra.ResourceAgentError as e:
+        utils.process_library_reports(
+            [lib_ra.resource_agent_error_to_report_item(e)]
+        )
+    except LibraryError as e:
+        utils.process_library_reports(e.args)
+    agent_name_parts = split_resource_agent_name(full_agent_name)
+
 
-    if not utils.is_valid_resource(ra_type) and "--force" not in utils.pcs_options:
-        utils.err ("Unable to create resource '%s', it is not installed on this system (use --force to override)" % ra_type)
+    dom = utils.get_cib_dom()
 
     if utils.does_id_exist(dom, ra_id):
         utils.err("unable to create resource/fence device '%s', '%s' already exists on this system" % (ra_id,ra_id))
@@ -511,7 +447,7 @@ def resource_create(
     # the default operations we remove if from the default operations
     op_values_agent = []
     if "--no-default-ops" not in utils.pcs_options:
-        default_op_values = utils.get_default_op_values(ra_type)
+        default_op_values = utils.get_default_op_values(full_agent_name)
         for def_op in default_op_values:
             match = False
             for op in op_values:
@@ -570,32 +506,40 @@ def resource_create(
         meta_values = []
 
     instance_attributes = convert_args_to_instance_variables(ra_values,ra_id)
-    primitive_values = get_full_ra_type(ra_type)
+    primitive_values = agent_name_parts[:]
     primitive_values.insert(0,("id",ra_id))
     meta_attributes = convert_args_to_meta_attrs(meta_values, ra_id)
-    if "--force" not in utils.pcs_options and utils.does_resource_have_options(ra_type):
+    if "--force" not in utils.pcs_options:
         params = utils.convert_args_to_tuples(ra_values)
         bad_opts, missing_req_opts = [], []
         try:
-            bad_opts, missing_req_opts = lib_ra.validate_instance_attributes(
-                utils.cmd_runner(),
-                dict(params),
-                get_full_ra_type(ra_type, True)
+            if full_agent_name.startswith("stonith:"):
+                metadata = lib_ra.StonithAgent(
+                    utils.cmd_runner(),
+                    full_agent_name[len("stonith:"):]
+                )
+            else:
+                metadata = lib_ra.ResourceAgent(
+                    utils.cmd_runner(),
+                    full_agent_name
+                )
+            bad_opts, missing_req_opts = metadata.validate_parameters_values(
+                dict(params)
             )
-        except lib_ra.ResourceAgentLibError as e:
+        except lib_ra.ResourceAgentError as e:
             utils.process_library_reports(
-                [lib_ra.resource_agent_lib_error_to_report_item(e)]
+                [lib_ra.resource_agent_error_to_report_item(e)]
             )
         except LibraryError as e:
             utils.process_library_reports(e.args)
         if len(bad_opts) != 0:
             utils.err ("resource option(s): '%s', are not recognized for resource type: '%s' (use --force to override)" \
-                    % (", ".join(sorted(bad_opts)), get_full_ra_type(ra_type, True)))
+                    % (", ".join(sorted(bad_opts)), full_agent_name))
         if len(missing_req_opts) != 0:
             utils.err(
                 "missing required option(s): '%s' for resource type: %s"
                     " (use --force to override)"
-                % (", ".join(missing_req_opts), get_full_ra_type(ra_type, True))
+                % (", ".join(missing_req_opts), full_agent_name)
             )
 
     resource_elem = create_xml_element("primitive", primitive_values, instance_attributes + meta_attributes)
@@ -838,37 +782,46 @@ def resource_move(argv,clear=False,ban=False):
                 msg.append("\n" + output)
             utils.err("\n".join(msg).strip())
 
-def resource_standards(return_output=False):
-    output, dummy_retval = utils.run(["crm_resource","--list-standards"], True)
-    # Return value is ignored because it contains the number of standards
-    # returned, not an error code
-    output = output.strip()
-    if return_output == True:
-        return output
-    print(output)
-
-def resource_providers():
-    output, dummy_retval = utils.run(["crm_resource","--list-ocf-providers"],True)
-    # Return value is ignored because it contains the number of providers
-    # returned, not an error code
-    print(output.strip())
-
-def resource_agents(argv):
+
+def resource_standards(lib, argv, modifiers):
+    if argv:
+        raise CmdLineInputError()
+
+    standards = lib.resource_agent.list_standards()
+
+    if standards:
+        print("\n".join(standards))
+    else:
+        utils.err("No standards found")
+
+
+def resource_providers(lib, argv, modifiers):
+    if argv:
+        raise CmdLineInputError()
+
+    providers = lib.resource_agent.list_ocf_providers()
+
+    if providers:
+        print("\n".join(providers))
+    else:
+        utils.err("No OCF providers found")
+
+
+def resource_agents(lib, argv, modifiers):
     if len(argv) > 1:
-        usage.resource()
-        sys.exit(1)
-    elif len(argv) == 1:
-        standards = [argv[0]]
+        raise CmdLineInputError()
+
+    standard = argv[0] if argv else None
+
+    agents = lib.resource_agent.list_agents_for_standard_and_provider(standard)
+
+    if agents:
+        print("\n".join(agents))
     else:
-        output = resource_standards(True)
-        standards = output.split('\n')
+        utils.err("No agents found{0}".format(
+            " for {0}".format(argv[0]) if argv else ""
+        ))
 
-    for s in standards:
-        output, dummy_retval = utils.run(["crm_resource", "--list-agents", s])
-        preg = re.compile(r'\d+ agents found for standard.*$', re.MULTILINE)
-        output = preg.sub("", output)
-        output = output.strip()
-        print(output)
 
 # Update a resource, removing any args that are empty and adding/updating
 # args that are not empty
@@ -920,12 +873,20 @@ def resource_update(res_id,args):
             resource_type = resClass + ":" + resProvider + ":" + resType
         bad_opts = []
         try:
-            bad_opts, _ = lib_ra.validate_instance_attributes(
-                utils.cmd_runner(), dict(params), resource_type
-            )
-        except lib_ra.ResourceAgentLibError as e:
+            if resource_type.startswith("stonith:"):
+                metadata = lib_ra.StonithAgent(
+                    utils.cmd_runner(),
+                    resource_type[len("stonith:"):]
+                )
+            else:
+                metadata = lib_ra.ResourceAgent(
+                    utils.cmd_runner(),
+                    resource_type
+                )
+            bad_opts, _ = metadata.validate_parameters_values(dict(params))
+        except lib_ra.ResourceAgentError as e:
             utils.process_library_reports(
-                [lib_ra.resource_agent_lib_error_to_report_item(e)]
+                [lib_ra.resource_agent_error_to_report_item(e)]
             )
         except LibraryError as e:
             utils.process_library_reports(e.args)
@@ -1298,30 +1259,26 @@ def convert_args_to_instance_variables(ra_values, ra_id):
     ret = ("instance_attributes", [[("id"),(attribute_id)]], ivs)
     return [ret]
 
-
-# Passed a resource type (ex. ocf:heartbeat:IPaddr2 or IPaddr2) and returns
-# a list of tuples mapping the types to xml attributes
-def get_full_ra_type(ra_type, return_string = False):
-    if (ra_type.count(":") == 0):
-        if os.path.isfile("/usr/lib/ocf/resource.d/heartbeat/%s" % ra_type):
-            ra_type = "ocf:heartbeat:" + ra_type
-        elif os.path.isfile("/usr/lib/ocf/resource.d/pacemaker/%s" % ra_type):
-            ra_type = "ocf:pacemaker:" + ra_type
-        elif os.path.isfile("/usr/share/pacemaker/nagios/plugins-metadata/%s.xml" % ra_type):
-            ra_type = "nagios:" + ra_type
-        else:
-            ra_type = "ocf:heartbeat:" + ra_type
-
-
-    if return_string:
-        return ra_type
-
-    ra_def = ra_type.split(":")
-    # If len = 2 then we're creating a fence device
-    if len(ra_def) == 2:
-        return([("class",ra_def[0]),("type",ra_def[1])])
-    else:
-        return([("class",ra_def[0]),("type",ra_def[2]),("provider",ra_def[1])])
+def split_resource_agent_name(full_agent_name):
+    match = re.match(
+        "^(?P<standard>[^:]+)(:(?P<provider>[^:]+))?:(?P<type>[^:]+)$",
+        full_agent_name
+    )
+    if not match:
+        utils.err(
+            "Invalid resource agent name '{0}'".format(
+                full_agent_name
+            )
+        )
+    parts = [
+        ("class", match.group("standard")),
+        ("type", match.group("type")),
+    ]
+    if match.group("provider"):
+        parts.append(
+            ("provider", match.group("provider"))
+        )
+    return parts
 
 
 def create_xml_element(tag, options, children = []):
@@ -2839,25 +2796,20 @@ def print_resources_utilization():
 
 
 def get_resource_agent_info(argv):
+# This is used only by pcsd, will be removed in new architecture
     if len(argv) != 1:
         utils.err("One parameter expected")
 
     agent = argv[0]
-    try:
-        metadata_dom = lib_ra.get_resource_agent_metadata(
-            utils.cmd_runner(),
-            agent
-        )
-        metadata = lib_ra.get_agent_desc(metadata_dom)
-        metadata["name"] = agent
-        metadata["parameters"] = lib_ra.get_resource_agent_parameters(
-            metadata_dom
-        )
 
-        print(json.dumps(metadata))
-    except lib_ra.ResourceAgentLibError as e:
+    runner = utils.cmd_runner()
+
+    try:
+        metadata = lib_ra.ResourceAgent(runner, agent)
+        print(json.dumps(metadata.get_full_info()))
+    except lib_ra.ResourceAgentError as e:
         utils.process_library_reports(
-            [lib_ra.resource_agent_lib_error_to_report_item(e)]
+            [lib_ra.resource_agent_error_to_report_item(e)]
         )
     except LibraryError as e:
         utils.process_library_reports(e.args)
diff --git a/pcs/settings_default.py b/pcs/settings_default.py
index 84eeacc..bff96e7 100644
--- a/pcs/settings_default.py
+++ b/pcs/settings_default.py
@@ -1,5 +1,8 @@
 import os.path
 
+systemctl_binary = "/bin/systemctl"
+chkconfig_binary = "/sbin/chkconfig"
+service_binary = "/sbin/service"
 pacemaker_binaries = "/usr/sbin/"
 corosync_binaries = "/usr/sbin/"
 corosync_qnet_binaries = "/usr/bin/"
@@ -22,10 +25,11 @@ pengine_binary = "/usr/libexec/pacemaker/pengine"
 crmd_binary = "/usr/libexec/pacemaker/crmd"
 cib_binary = "/usr/libexec/pacemaker/cib"
 stonithd_binary = "/usr/libexec/pacemaker/stonithd"
-pcs_version = "0.9.154"
+pcs_version = "0.9.155"
 crm_report = pacemaker_binaries + "crm_report"
 crm_verify = pacemaker_binaries + "crm_verify"
 crm_mon_schema = '/usr/share/pacemaker/crm_mon.rng'
+agent_metadata_schema = "/usr/share/resource-agents/ra-api-1.dtd"
 pcsd_cert_location = "/var/lib/pcsd/pcsd.crt"
 pcsd_key_location = "/var/lib/pcsd/pcsd.key"
 pcsd_tokens_location = "/var/lib/pcsd/tokens"
@@ -35,9 +39,6 @@ pcsd_exec_location = "/usr/lib/pcsd/"
 cib_dir = "/var/lib/pacemaker/cib/"
 pacemaker_uname = "hacluster"
 pacemaker_gname = "haclient"
-ocf_root = "/usr/lib/ocf/"
-ocf_resources = os.path.join(ocf_root, "resource.d/")
-nagios_metadata_path = "/usr/share/pacemaker/nagios/plugins-metadata/"
 sbd_watchdog_default = "/dev/watchdog"
 sbd_config = "/etc/sysconfig/sbd"
 pacemaker_wait_timeout_status = 62
diff --git a/pcs/stonith.py b/pcs/stonith.py
index 0942979..bce346d 100644
--- a/pcs/stonith.py
+++ b/pcs/stonith.py
@@ -7,9 +7,7 @@ from __future__ import (
 
 import sys
 import re
-import glob
 import json
-import os
 
 from pcs import (
     resource,
@@ -18,55 +16,55 @@ from pcs import (
 )
 from pcs.cli.common import parse_args
 from pcs.cli.common.errors import CmdLineInputError
+from pcs.cli.common.reports import build_report_message
 from pcs.lib.errors import LibraryError, ReportItemSeverity
 import pcs.lib.resource_agent as lib_ra
 
 def stonith_cmd(argv):
+    if len(argv) < 1:
+        sub_cmd, argv_next = "show", []
+    else:
+        sub_cmd, argv_next = argv[0], argv[1:]
+
     lib = utils.get_library_wrapper()
     modifiers = utils.get_modificators()
-    if len(argv) == 0:
-        argv = ["show"]
 
-    sub_cmd = argv.pop(0)
     try:
-        if (sub_cmd == "help"):
+        if sub_cmd == "help":
             usage.stonith(argv)
-        elif (sub_cmd == "list"):
-            stonith_list_available(argv)
-        elif (sub_cmd == "describe"):
-            if len(argv) == 1:
-                stonith_list_options(argv[0])
-            else:
-                raise CmdLineInputError()
-        elif (sub_cmd == "create"):
-            stonith_create(argv)
-        elif (sub_cmd == "update"):
-            if len(argv) > 1:
-                stn_id = argv.pop(0)
-                resource.resource_update(stn_id,argv)
+        elif sub_cmd == "list":
+            stonith_list_available(lib, argv_next, modifiers)
+        elif sub_cmd == "describe":
+            stonith_list_options(lib, argv_next, modifiers)
+        elif sub_cmd == "create":
+            stonith_create(argv_next)
+        elif sub_cmd == "update":
+            if len(argv_next) > 1:
+                stn_id = argv_next.pop(0)
+                resource.resource_update(stn_id, argv_next)
             else:
                 raise CmdLineInputError()
-        elif (sub_cmd == "delete"):
-            if len(argv) == 1:
-                stn_id = argv.pop(0)
+        elif sub_cmd == "delete":
+            if len(argv_next) == 1:
+                stn_id = argv_next.pop(0)
                 resource.resource_remove(stn_id)
             else:
                 raise CmdLineInputError()
-        elif (sub_cmd == "show"):
-            resource.resource_show(argv, True)
+        elif sub_cmd == "show":
+            resource.resource_show(argv_next, True)
             stonith_level([])
-        elif (sub_cmd == "level"):
-            stonith_level(argv)
-        elif (sub_cmd == "fence"):
-            stonith_fence(argv)
-        elif (sub_cmd == "cleanup"):
-            resource.resource_cleanup(argv)
-        elif (sub_cmd == "confirm"):
-            stonith_confirm(argv)
-        elif (sub_cmd == "get_fence_agent_info"):
-            get_fence_agent_info(argv)
-        elif (sub_cmd == "sbd"):
-            sbd_cmd(lib, argv, modifiers)
+        elif sub_cmd == "level":
+            stonith_level(argv_next)
+        elif sub_cmd == "fence":
+            stonith_fence(argv_next)
+        elif sub_cmd == "cleanup":
+            resource.resource_cleanup(argv_next)
+        elif sub_cmd == "confirm":
+            stonith_confirm(argv_next)
+        elif sub_cmd == "get_fence_agent_info":
+            get_fence_agent_info(argv_next)
+        elif sub_cmd == "sbd":
+            sbd_cmd(lib, argv_next, modifiers)
         else:
             raise CmdLineInputError()
     except LibraryError as e:
@@ -74,71 +72,45 @@ def stonith_cmd(argv):
     except CmdLineInputError as e:
         utils.exit_on_cmdline_input_errror(e, "stonith", sub_cmd)
 
-def stonith_list_available(argv):
-    if len(argv) != 0:
-        filter_string = argv[0]
-    else:
-        filter_string = ""
-
-    bad_fence_devices = ["kdump_send", "legacy", "na", "nss_wrapper",
-            "pcmk", "vmware_helper", "ack_manual", "virtd", "sanlockd",
-            "check", "tool", "node"]
-    fence_devices = sorted(glob.glob(utils.fence_bin + "fence_*"))
-    for bfd in bad_fence_devices:
-        try:
-            fence_devices.remove(utils.fence_bin + "fence_"+bfd)
-        except ValueError:
-            continue
 
-    if not fence_devices:
+def stonith_list_available(lib, argv, modifiers):
+    if len(argv) > 1:
+        raise CmdLineInputError()
+
+    search = argv[0] if argv else None
+    agent_list = lib.stonith_agent.list_agents(modifiers["describe"], search)
+
+    if not agent_list:
+        if search:
+            utils.err("No stonith agents matching the filter.")
         utils.err(
-            "No stonith agents available. Do you have fence agents installed?"
+            "No stonith agents available. "
+            "Do you have fence agents installed?"
         )
-    fence_devices_filtered = [fd for fd in fence_devices if filter_string in fd]
-    if not fence_devices_filtered:
-        utils.err("No stonith agents matching the filter.")
-
-    for fd in fence_devices_filtered:
-        sd = ""
-        agent_name = os.path.basename(fd)
-        if "--nodesc" not in utils.pcs_options:
-            try:
-                metadata = lib_ra.get_fence_agent_metadata(
-                    utils.cmd_runner(), agent_name
-                )
-                shortdesc = lib_ra.get_agent_desc(metadata)["shortdesc"]
-                if shortdesc:
-                    sd = " - " + resource.format_desc(
-                        len(agent_name) + 3, shortdesc
-                    )
-            except lib_ra.ResourceAgentLibError as e:
-                utils.process_library_reports([
-                    lib_ra.resource_agent_lib_error_to_report_item(
-                        e, ReportItemSeverity.WARNING
-                    )
-                ])
-            except LibraryError as e:
-                utils.err(
-                    e.args[-1].message, False
+
+    for agent_info in agent_list:
+        name = agent_info["name"]
+        shortdesc = agent_info["shortdesc"]
+        if shortdesc:
+            print("{0} - {1}".format(
+                name,
+                resource._format_desc(
+                    len(name + " - "), shortdesc.replace("\n", " ")
                 )
-                continue
-        print(agent_name + sd)
+            ))
+        else:
+            print(name)
 
-def stonith_list_options(stonith_agent):
-    runner = utils.cmd_runner()
-    try:
-        metadata = lib_ra.get_fence_agent_metadata(runner, stonith_agent)
-        desc = lib_ra.get_agent_desc(metadata)
-        params = lib_ra.get_fence_agent_parameters(runner, metadata)
-        # Fence agents just list the actions, usually without any attributes.
-        # We could print them but it wouldn't add any usefull information.
-        resource.resource_print_options(stonith_agent, desc, params, actions=[])
-    except lib_ra.ResourceAgentLibError as e:
-        utils.process_library_reports(
-            [lib_ra.resource_agent_lib_error_to_report_item(e)]
-        )
-    except LibraryError as e:
-        utils.process_library_reports(e.args)
+
+def stonith_list_options(lib, argv, modifiers):
+    if len(argv) != 1:
+        raise CmdLineInputError()
+    agent_name = argv[0]
+
+    print(resource._format_agent_description(
+        lib.stonith_agent.describe_agent(agent_name),
+        True
+    ))
 
 
 def stonith_create(argv):
@@ -153,22 +125,23 @@ def stonith_create(argv):
     )
 
     try:
-        metadata = lib_ra.get_fence_agent_metadata(
-            utils.cmd_runner(), stonith_type
+        metadata = lib_ra.StonithAgent(
+            utils.cmd_runner(),
+            stonith_type
         )
-        if stonith_does_agent_provide_unfencing(metadata):
+        if metadata.get_provides_unfencing():
             meta_values = [
                 meta for meta in meta_values if not meta.startswith("provides=")
             ]
             meta_values.append("provides=unfencing")
-    except lib_ra.ResourceAgentLibError as e:
+    except lib_ra.ResourceAgentError as e:
         forced = utils.get_modificators().get("force", False)
         if forced:
             severity = ReportItemSeverity.WARNING
         else:
             severity = ReportItemSeverity.ERROR
         utils.process_library_reports([
-            lib_ra.resource_agent_lib_error_to_report_item(
+            lib_ra.resource_agent_error_to_report_item(
                 e, severity, not forced
             )
         ])
@@ -410,20 +383,9 @@ def stonith_confirm(argv, skip_question=False):
     else:
         print("Node: %s confirmed fenced" % node)
 
-def stonith_does_agent_provide_unfencing(metadata_dom):
-    for action in lib_ra.get_agent_actions(metadata_dom):
-        if (
-            action["name"] == "on" and
-            "on_target" in action and
-            action["on_target"] == "1" and
-            "automatic" in action and
-            action["automatic"] == "1"
-        ):
-            return True
-    return False
-
 
 def get_fence_agent_info(argv):
+# This is used only by pcsd, will be removed in new architecture
     if len(argv) != 1:
         utils.err("One parameter expected")
 
@@ -434,20 +396,13 @@ def get_fence_agent_info(argv):
     runner = utils.cmd_runner()
 
     try:
-        metadata_dom = lib_ra.get_fence_agent_metadata(
-            runner,
-            agent.split("stonith:", 1)[1]
-        )
-        metadata = lib_ra.get_agent_desc(metadata_dom)
-        metadata["name"] = agent
-        metadata["parameters"] = lib_ra.get_fence_agent_parameters(
-            runner, metadata_dom
-        )
-
-        print(json.dumps(metadata))
-    except lib_ra.ResourceAgentLibError as e:
+        metadata = lib_ra.StonithAgent(runner, agent[len("stonith:"):])
+        info = metadata.get_full_info()
+        info["name"] = "stonith:{0}".format(info["name"])
+        print(json.dumps(info))
+    except lib_ra.ResourceAgentError as e:
         utils.process_library_reports(
-            [lib_ra.resource_agent_lib_error_to_report_item(e)]
+            [lib_ra.resource_agent_error_to_report_item(e)]
         )
     except LibraryError as e:
         utils.process_library_reports(e.args)
diff --git a/pcs/test/suite.py b/pcs/test/suite.py
index b6c7be2..fdab448 100755
--- a/pcs/test/suite.py
+++ b/pcs/test/suite.py
@@ -1,5 +1,4 @@
 #!/usr/bin/env python
-
 from __future__ import (
     absolute_import,
     division,
@@ -57,39 +56,48 @@ def discover_tests(explicitly_enumerated_tests, exclude_enumerated_tests=False):
         ])
     return unittest.TestLoader().loadTestsFromNames(explicitly_enumerated_tests)
 
-def run_tests(tests, verbose=False, color=False):
-    resultclass = unittest.TextTestResult
-    if color:
-        from pcs.test.tools.color_text_runner import ColorTextTestResult
-        resultclass = ColorTextTestResult
-
-    testRunner = unittest.TextTestRunner(
-        verbosity=2 if verbose else 1,
-        resultclass=resultclass
-    )
-    return testRunner.run(tests)
 
 explicitly_enumerated_tests = [
     prepare_test_name(arg) for arg in sys.argv[1:] if arg not in (
         "-v",
-        "--color",
-        "--no-color",
+        "--vanilla",
+        "--no-color", #deprecated, use --vanilla instead
         "--all-but",
+        "--last-slash",
+        "--traditional-verbose",
+        "--traceback-highlight",
     )
 ]
-test_result = run_tests(
-    discover_tests(explicitly_enumerated_tests, "--all-but" in sys.argv),
-    verbose="-v" in sys.argv,
-    color=(
-        "--color" in sys.argv
-        or
-        (
-            sys.stdout.isatty()
-            and
-            sys.stderr.isatty()
-            and "--no-color" not in sys.argv
-        )
-    ),
+
+if "--no-color" in sys.argv:
+    print("DEPRECATED: --no-color is deprecated, use --vanilla instead")
+
+use_improved_result_class = (
+    sys.stdout.isatty()
+    and
+    sys.stderr.isatty()
+    and (
+        "--vanilla" not in sys.argv
+        and
+        "--no-color" not in sys.argv #deprecated, use --vanilla instead
+    )
+)
+
+resultclass = unittest.TextTestResult
+if use_improved_result_class:
+    from pcs.test.tools.color_text_runner import get_text_test_result_class
+    resultclass = get_text_test_result_class(
+        slash_last_fail_in_overview=("--last-slash" in sys.argv),
+        traditional_verbose=("--traditional-verbose" in sys.argv),
+        traceback_highlight=("--traceback-highlight" in sys.argv),
+    )
+
+testRunner = unittest.TextTestRunner(
+    verbosity=2 if "-v" in sys.argv else 1,
+    resultclass=resultclass
+)
+test_result =  testRunner.run(
+    discover_tests(explicitly_enumerated_tests, "--all-but" in sys.argv)
 )
 if not test_result.wasSuccessful():
     sys.exit(1)
@@ -110,5 +118,5 @@ if not test_result.wasSuccessful():
 # run all test except some:
 # pcs/test/suite.py pcs.test_acl.ACLTest --all-but
 #
-# for colored test report
-# pcs/test/suite.py --color
+# for remove extra features even if sys.stdout is attached to terminal
+# pcs/test/suite.py --vanilla
diff --git a/pcs/test/test_acl.py b/pcs/test/test_acl.py
index 186c035..9f9f878 100644
--- a/pcs/test/test_acl.py
+++ b/pcs/test/test_acl.py
@@ -43,7 +43,7 @@ class ACLTest(unittest.TestCase, AssertPcsMixin):
 
         self.assert_pcs_success(
             'acl role create test_role read xpath my_xpath',
-            "Cluster CIB has been upgraded to latest version\n"
+            "CIB has been upgraded to the latest schema version.\n"
         )
 
         with open(temp_cib) as myfile:
@@ -90,19 +90,19 @@ class ACLTest(unittest.TestCase, AssertPcsMixin):
         ac(o,"")
 
         o, r = pcs("acl user create user1 roleX")
-        ac(o, "Error: cannot find acl role: roleX\n")
+        ac(o, "Error: role 'roleX' does not exist\n")
         self.assertEqual(1, r)
 
         o, r = pcs("acl user create user1 role1 roleX")
-        ac(o, "Error: cannot find acl role: roleX\n")
+        ac(o, "Error: role 'roleX' does not exist\n")
         self.assertEqual(1, r)
 
         o, r = pcs("acl group create group1 roleX")
-        ac(o, "Error: cannot find acl role: roleX\n")
+        ac(o, "Error: role 'roleX' does not exist\n")
         self.assertEqual(1, r)
 
         o, r = pcs("acl group create group1 role1 roleX")
-        ac(o, "Error: cannot find acl role: roleX\n")
+        ac(o, "Error: role 'roleX' does not exist\n")
         self.assertEqual(1, r)
 
         o, r = pcs("acl")
@@ -131,11 +131,26 @@ Role: role3
 
         o,r = pcs("acl")
         assert r == 0
-        ac(o,"ACLs are disabled, run 'pcs acl enable' to enable\n\nUser: user1\n  Roles: role1 role2\nGroup: group1\n  Roles: role1 role3\nRole: role1\n  Permission: read xpath /xpath1/ (role1-read)\n  Permission: write xpath /xpath2/ (role1-write)\nRole: role2\n  Permission: deny xpath /xpath3/ (role2-deny)\n  Permission: deny xpath /xpath4/ (role2-deny-1)\nRole: role3\n  Permission: read xpath /xpath5/ (role3-read)\n  Permission: read xpath /xpath6/ (role3-read-1)\n")
+        ac(
+            o,
+            """\
+ACLs are disabled, run 'pcs acl enable' to enable
 
-        o,r = pcs("acl role create user1")
-        assert r == 1
-        ac(o,"Error: 'user1' already exists\n")
+User: user1
+  Roles: role1 role2
+Group: group1
+  Roles: role1 role3
+Role: role1
+  Permission: read xpath /xpath1/ (role1-read)
+  Permission: write xpath /xpath2/ (role1-write)
+Role: role2
+  Permission: deny xpath /xpath3/ (role2-deny)
+  Permission: deny xpath /xpath4/ (role2-deny-1)
+Role: role3
+  Permission: read xpath /xpath5/ (role3-read)
+  Permission: read xpath /xpath6/ (role3-read-1)
+"""
+        )
 
         o,r = pcs("acl role create group1")
         assert r == 1
@@ -147,35 +162,23 @@ Role: role3
 
         o,r = pcs("acl user create user1")
         assert r == 1
-        ac(o,"Error: user user1 already exists\n")
-
-        o,r = pcs("acl user create group1")
-        assert r == 1
-        ac(o,"Error: group1 already exists\n")
-
-        o,r = pcs("acl user create role1")
-        assert r == 1
-        ac(o,"Error: role1 already exists\n")
-
-        o,r = pcs("acl group create user1")
-        assert r == 1
-        ac(o,"Error: user1 already exists\n")
+        ac(o,"Error: 'user1' already exists\n")
 
         o,r = pcs("acl group create group1")
         assert r == 1
-        ac(o,"Error: group group1 already exists\n")
+        ac(o,"Error: 'group1' already exists\n")
 
         o,r = pcs("acl group create role1")
         assert r == 1
-        ac(o,"Error: role1 already exists\n")
+        ac(o,"Error: 'role1' already exists\n")
 
         o,r = pcs("acl role assign role1 to noexist")
         assert r == 1
-        ac(o,"Error: cannot find user or group: noexist\n")
+        ac(o,"Error: user/group 'noexist' does not exist\n")
 
         o,r = pcs("acl role assign noexist to user1")
         assert r == 1
-        ac(o,"Error: cannot find role: noexist\n")
+        ac(o,"Error: role 'noexist' does not exist\n")
 
         o,r = pcs("acl role assign role3 to user1")
         assert r == 0
@@ -187,11 +190,11 @@ Role: role3
 
         o,r = pcs("acl role unassign noexist from user1")
         assert r == 1
-        ac(o,"Error: cannot find role: noexist, assigned to user/group: user1\n")
+        ac(o,"Error: Role 'noexist' is not assigned to 'user1'\n")
 
         o,r = pcs("acl role unassign role3 from noexist")
         assert r == 1
-        ac(o,"Error: cannot find user or group: noexist\n")
+        ac(o,"Error: user/group 'noexist' does not exist\n")
 
         o,r = pcs("acl role unassign role3 from user1")
         assert r == 0
@@ -371,7 +374,7 @@ Role: role2
 
         o,r = pcs("acl user create user1")
         assert r == 1
-        ac(o,"Error: user user1 already exists\n")
+        ac(o,"Error: 'user1' already exists\n")
 
         o,r = pcs("acl group create group1")
         ac(o,"")
@@ -383,7 +386,7 @@ Role: role2
 
         o,r = pcs("acl group create group1")
         assert r == 1
-        ac(o,"Error: group group1 already exists\n")
+        ac(o,"Error: 'group1' already exists\n")
 
         o,r = pcs("acl")
         ac(o,"""\
@@ -402,7 +405,7 @@ Group: group2
 
         o,r = pcs("acl group delete user1")
         assert r == 1
-        ac(o,"Error: unable to find acl group: user1\n")
+        ac(o,"Error: group 'user1' does not exist\n")
 
         o,r = pcs("acl")
         ac(o, """\
@@ -591,7 +594,7 @@ User: user2
 
         o,r = pcs("acl role delete role2")
         assert r == 1
-        ac(o,"Error: unable to find acl role: role2\n")
+        ac(o,"Error: role 'role2' does not exist\n")
 
         o,r = pcs("acl role delete role1")
         assert r == 0
@@ -647,7 +650,7 @@ User: user2
         assert r == 0
 
         o,r = pcs("acl permission delete role4-deny")
-        ac(o,"Error: Unable to find permission with id: role4-deny\n")
+        ac(o,"Error: permission 'role4-deny' does not exist\n")
         assert r == 1
 
         o,r = pcs("acl show")
@@ -812,3 +815,76 @@ Role: role4
             'acl role unknown whatever',
             stdout_start="\nUsage: pcs acl role..."
         )
+
+    def test_assign_unassign_role_to_user(self):
+        self.assert_pcs_success("acl role create role1")
+        self.assert_pcs_success("acl user create user1")
+        self.assert_pcs_success("acl role assign role1 user user1")
+        self.assert_pcs_fail(
+            "acl role assign role1 user user1",
+            "Error: Role 'role1' is already asigned to 'user1'\n"
+        )
+        self.assert_pcs_success("acl role unassign role1 user user1")
+        self.assert_pcs_fail(
+            "acl role unassign role1 user user1",
+            "Error: Role 'role1' is not assigned to 'user1'\n"
+        )
+
+    def test_assign_unassign_role_to_user_not_existing_user(self):
+        self.assert_pcs_success("acl role create role1")
+        self.assert_pcs_success("acl group create group1")
+        self.assert_pcs_fail(
+            "acl role assign role1 to user group1",
+            "Error: user 'group1' does not exist\n"
+        )
+
+    def test_assign_unassign_role_to_user_with_to(self):
+        self.assert_pcs_success("acl role create role1")
+        self.assert_pcs_success("acl user create user1")
+        self.assert_pcs_success("acl role assign role1 to user user1")
+        self.assert_pcs_fail(
+            "acl role assign role1 to user user1",
+            "Error: Role 'role1' is already asigned to 'user1'\n"
+        )
+        self.assert_pcs_success("acl role unassign role1 from user user1")
+        self.assert_pcs_fail(
+            "acl role unassign role1 from user user1",
+            "Error: Role 'role1' is not assigned to 'user1'\n"
+        )
+
+    def test_assign_unassign_role_to_group(self):
+        self.assert_pcs_success("acl role create role1")
+        self.assert_pcs_success("acl group create group1")
+        self.assert_pcs_success("acl role assign role1 group group1")
+        self.assert_pcs_fail(
+            "acl role assign role1 group group1",
+            "Error: Role 'role1' is already asigned to 'group1'\n"
+        )
+        self.assert_pcs_success("acl role unassign role1 group group1")
+        self.assert_pcs_fail(
+            "acl role unassign role1 group group1",
+            "Error: Role 'role1' is not assigned to 'group1'\n"
+        )
+
+    def test_assign_unassign_role_to_group_not_existing_group(self):
+        self.assert_pcs_success("acl role create role1")
+        self.assert_pcs_success("acl user create user1")
+        self.assert_pcs_fail(
+            "acl role assign role1 to group user1",
+            "Error: group 'user1' does not exist\n"
+        )
+
+    def test_assign_unassign_role_to_group_with_to(self):
+        self.assert_pcs_success("acl role create role1")
+        self.assert_pcs_success("acl group create group1")
+        self.assert_pcs_success("acl role assign role1 to group group1")
+        self.assert_pcs_fail(
+            "acl role assign role1 to group group1",
+            "Error: Role 'role1' is already asigned to 'group1'\n"
+        )
+        self.assert_pcs_success("acl role unassign role1 from group group1")
+        self.assert_pcs_fail(
+            "acl role unassign role1 from group group1",
+            "Error: Role 'role1' is not assigned to 'group1'\n"
+        )
+
diff --git a/pcs/test/test_alert.py b/pcs/test/test_alert.py
index d919ff6..ccb53d7 100644
--- a/pcs/test/test_alert.py
+++ b/pcs/test/test_alert.py
@@ -11,6 +11,7 @@ import shutil
 from pcs.test.tools.misc import (
     get_test_resource as rc,
     is_minimum_pacemaker_version,
+    outdent,
 )
 from pcs.test.tools.assertions import AssertPcsMixin
 from pcs.test.tools.pcs_runner import PcsRunner
@@ -202,24 +203,61 @@ class RemoveAlertTest(PcsAlertTest):
             "alert remove alert1", "Error: Alert 'alert1' not found.\n"
         )
 
-    def test_success(self):
+    def test_one(self):
         self.assert_pcs_success(
-            "alert config",
-            """\
-Alerts:
- No alerts defined
-"""
+            "alert config", outdent("""\
+                Alerts:
+                 No alerts defined
+                """
+            )
         )
 
-        self.assert_pcs_success("alert create path=test")
+        self.assert_pcs_success("alert create path=test id=alert1")
         self.assert_pcs_success(
-            "alert config",
-            """\
-Alerts:
- Alert: alert (path=test)
-"""
+            "alert config", outdent("""\
+                Alerts:
+                 Alert: alert1 (path=test)
+                """
+            )
+        )
+        self.assert_pcs_success("alert remove alert1")
+        self.assert_pcs_success(
+            "alert config", outdent("""\
+                Alerts:
+                 No alerts defined
+                """
+            )
+        )
+
+    def test_multiple(self):
+        self.assert_pcs_success(
+            "alert config", outdent("""\
+                Alerts:
+                 No alerts defined
+                """
+            )
+        )
+
+        self.assert_pcs_success("alert create path=test id=alert1")
+        self.assert_pcs_success("alert create path=test id=alert2")
+        self.assert_pcs_success("alert create path=test id=alert3")
+        self.assert_pcs_success(
+            "alert config", outdent("""\
+                Alerts:
+                 Alert: alert1 (path=test)
+                 Alert: alert2 (path=test)
+                 Alert: alert3 (path=test)
+                """
+            )
+        )
+        self.assert_pcs_success("alert remove alert1 alert3")
+        self.assert_pcs_success(
+            "alert config", outdent("""\
+                Alerts:
+                 Alert: alert2 (path=test)
+                """
+            )
         )
-        self.assert_pcs_success("alert remove alert")
 
 
 @unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
@@ -448,31 +486,88 @@ Alerts:
 
 @unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
 class RemoveRecipientTest(PcsAlertTest):
-    def test_success(self):
+    def test_one(self):
         self.assert_pcs_success("alert create path=test")
         self.assert_pcs_success(
             "alert recipient add alert value=rec_value id=rec"
         )
         self.assert_pcs_success(
-            "alert config",
-            """\
-Alerts:
- Alert: alert (path=test)
-  Recipients:
-   Recipient: rec (value=rec_value)
-"""
+            "alert config", outdent("""\
+                Alerts:
+                 Alert: alert (path=test)
+                  Recipients:
+                   Recipient: rec (value=rec_value)
+                """
+            )
         )
         self.assert_pcs_success("alert recipient remove rec")
         self.assert_pcs_success(
-            "alert config",
-            """\
-Alerts:
- Alert: alert (path=test)
-"""
+            "alert config", outdent("""\
+                Alerts:
+                 Alert: alert (path=test)
+                """
+            )
+        )
+
+    def test_multiple(self):
+        self.assert_pcs_success("alert create path=test id=alert1")
+        self.assert_pcs_success("alert create path=test id=alert2")
+        self.assert_pcs_success(
+            "alert recipient add alert1 value=rec_value1 id=rec1"
+        )
+        self.assert_pcs_success(
+            "alert recipient add alert1 value=rec_value2 id=rec2"
+        )
+        self.assert_pcs_success(
+            "alert recipient add alert2 value=rec_value3 id=rec3"
+        )
+        self.assert_pcs_success(
+            "alert recipient add alert2 value=rec_value4 id=rec4"
+        )
+        self.assert_pcs_success(
+            "alert config", outdent("""\
+                Alerts:
+                 Alert: alert1 (path=test)
+                  Recipients:
+                   Recipient: rec1 (value=rec_value1)
+                   Recipient: rec2 (value=rec_value2)
+                 Alert: alert2 (path=test)
+                  Recipients:
+                   Recipient: rec3 (value=rec_value3)
+                   Recipient: rec4 (value=rec_value4)
+                """
+            )
+        )
+        self.assert_pcs_success("alert recipient remove rec1 rec2 rec4")
+        self.assert_pcs_success(
+            "alert config", outdent("""\
+                Alerts:
+                 Alert: alert1 (path=test)
+                 Alert: alert2 (path=test)
+                  Recipients:
+                   Recipient: rec3 (value=rec_value3)
+                """
+            )
         )
 
     def test_no_recipient(self):
+        self.assert_pcs_success("alert create path=test id=alert1")
+        self.assert_pcs_success(
+            "alert recipient add alert1 value=rec_value1 id=rec1"
+        )
         self.assert_pcs_fail(
-            "alert recipient remove rec",
-            "Error: Recipient 'rec' does not exist\n"
+            "alert recipient remove rec1 rec2 rec3", outdent("""\
+                Error: Recipient 'rec2' does not exist
+                Error: Recipient 'rec3' does not exist
+                """
+            )
+        )
+        self.assert_pcs_success(
+            "alert config", outdent("""\
+                Alerts:
+                 Alert: alert1 (path=test)
+                  Recipients:
+                   Recipient: rec1 (value=rec_value1)
+                """
+            )
         )
diff --git a/pcs/test/test_booth.py b/pcs/test/test_booth.py
index c12391b..2a445e1 100644
--- a/pcs/test/test_booth.py
+++ b/pcs/test/test_booth.py
@@ -12,7 +12,6 @@ from pcs.test.tools import pcs_unittest as unittest
 from pcs.test.tools.assertions import AssertPcsMixin, console_report
 from pcs.test.tools.misc import get_test_resource as rc
 from pcs.test.tools.pcs_runner import PcsRunner
-from pcs import settings
 
 
 EMPTY_CIB = rc("cib-empty.xml")
@@ -21,8 +20,8 @@ TEMP_CIB = rc("temp-cib.xml")
 BOOTH_CONFIG_FILE = rc("temp-booth.cfg")
 BOOTH_KEY_FILE = rc("temp-booth.key")
 
-BOOTH_RESOURCE_AGENT_INSTALLED = "booth-site" in os.listdir(
-    os.path.join(settings.ocf_resources, "pacemaker")
+BOOTH_RESOURCE_AGENT_INSTALLED = os.path.exists(
+    "/usr/lib/ocf/resource.d/pacemaker/booth-site"
 )
 need_booth_resource_agent = unittest.skipUnless(
     BOOTH_RESOURCE_AGENT_INSTALLED,
diff --git a/pcs/test/test_cluster.py b/pcs/test/test_cluster.py
index 597e0e4..f442f34 100644
--- a/pcs/test/test_cluster.py
+++ b/pcs/test/test_cluster.py
@@ -7,6 +7,7 @@ from __future__ import (
 
 import os
 import shutil
+import socket
 from pcs.test.tools import pcs_unittest as unittest
 
 from pcs.test.tools.assertions import AssertPcsMixin
@@ -28,6 +29,18 @@ cluster_conf_file = rc("cluster.conf")
 cluster_conf_tmp = rc("cluster.conf.tmp")
 corosync_conf_tmp = rc("corosync.conf.tmp")
 
+try:
+    s1 = socket.gethostbyname("rh7-1.localhost")
+    s2 = socket.gethostbyname("rh7-2.localhost")
+    TEST_NODES_RESOLVED = True
+except socket.gaierror:
+    TEST_NODES_RESOLVED = False
+
+need_to_resolve_test_nodes = unittest.skipUnless(
+    TEST_NODES_RESOLVED,
+    "unable to resolve all hostnames: rh7-1.localhost, rh7-2.localhost"
+)
+
 class ClusterTest(unittest.TestCase, AssertPcsMixin):
     def setUp(self):
         shutil.copy(empty_cib, temp_cib)
@@ -50,10 +63,16 @@ class ClusterTest(unittest.TestCase, AssertPcsMixin):
         assert returnVal == 0
 
     def testRemoteNode(self):
-        o,r = pcs(temp_cib, "resource create D1 Dummy --no-default-ops")
+        o,r = pcs(
+            temp_cib,
+            "resource create D1 ocf:heartbeat:Dummy --no-default-ops"
+        )
         assert r==0 and o==""
 
-        o,r = pcs(temp_cib, "resource create D2 Dummy --no-default-ops")
+        o,r = pcs(
+            temp_cib,
+            "resource create D2 ocf:heartbeat:Dummy --no-default-ops"
+        )
         assert r==0 and o==""
 
         o,r = pcs(temp_cib, "cluster remote-node rh7-2 D1")
@@ -113,6 +132,7 @@ class ClusterTest(unittest.TestCase, AssertPcsMixin):
         )
         self.assertEqual(1, returnVal)
 
+    @need_to_resolve_test_nodes
     def test_cluster_setup_hostnames_resolving(self):
         output, returnVal = pcs(
             temp_cib,
@@ -237,6 +257,7 @@ logging {
 }
 """)
 
+    @need_to_resolve_test_nodes
     def test_cluster_setup_file_exists_rhel6(self):
         if not utils.is_rhel6():
             return
@@ -823,6 +844,7 @@ logging {
 }
 """)
 
+    @need_to_resolve_test_nodes
     def test_cluster_setup_2_nodes_rhel6(self):
         # Setup a 2 node cluster and make sure the two node config is set, then
         # add a node and make sure that it's unset, then remove a node and make
@@ -1079,6 +1101,7 @@ logging {
 </cluster>
 """)
 
+    @need_to_resolve_test_nodes
     def test_cluster_setup_3_nodes_rhel6(self):
         # Setup a 3 node cluster
         if not utils.is_rhel6():
@@ -1130,6 +1153,7 @@ logging {
 </cluster>
 """)
 
+    @need_to_resolve_test_nodes
     def test_cluster_setup_transport_rhel6(self):
         # Test to make transport is set
         if not utils.is_rhel6():
@@ -1222,6 +1246,7 @@ logging {
 }
 """)
 
+    @need_to_resolve_test_nodes
     def test_cluster_setup_ipv6_rhel6(self):
         if not utils.is_rhel6():
             return
@@ -1807,6 +1832,7 @@ logging {
 }
 """)
 
+    @need_to_resolve_test_nodes
     def test_cluster_setup_rrp_passive_udp_addr01_rhel6(self):
         if not utils.is_rhel6():
             return
@@ -1875,6 +1901,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
 </cluster>
 """)
 
+    @need_to_resolve_test_nodes
     def test_cluster_setup_rrp_passive_udp_addr01_mcast01_rhel6(self):
         if not utils.is_rhel6():
             return
@@ -1925,6 +1952,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
 </cluster>
 """)
 
+    @need_to_resolve_test_nodes
     def test_cluster_setup_rrp_passive_udp_addr01_mcastport01_rhel6(self):
         if not utils.is_rhel6():
             return
@@ -1975,6 +2003,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
 </cluster>
 """)
 
+    @need_to_resolve_test_nodes
     def test_cluster_setup_rrp_passive_udp_addr01_ttl01_rhel6(self):
         if not utils.is_rhel6():
             return
@@ -2025,6 +2054,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
 </cluster>
 """)
 
+    @need_to_resolve_test_nodes
     def test_cluster_setup_rrp_active_udp_addr01_rhel6(self):
         if not utils.is_rhel6():
             return
@@ -2088,6 +2118,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
 </cluster>
 """)
 
+    @need_to_resolve_test_nodes
     def test_cluster_setup_rrp_active_udp_broadcast_addr01_rhel6(self):
         if not utils.is_rhel6():
             return
@@ -2148,6 +2179,7 @@ Warning: using a RRP mode of 'active' is not supported or tested
 </cluster>
 """)
 
+    @need_to_resolve_test_nodes
     def test_cluster_setup_rrp_udpu_rhel6(self):
         if not utils.is_rhel6():
             return
@@ -2223,6 +2255,7 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
 </cluster>
 """)
 
+    @need_to_resolve_test_nodes
     def test_cluster_setup_broadcast_rhel6(self):
         if not utils.is_rhel6():
             return
@@ -2288,6 +2321,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
             data = f.read()
             ac(data, cluster_conf)
 
+    @need_to_resolve_test_nodes
     def test_cluster_setup_quorum_options_rhel6(self):
         if not utils.is_rhel6():
             return
@@ -2386,6 +2420,7 @@ logging {
 }
 """)
 
+    @need_to_resolve_test_nodes
     def test_cluster_setup_totem_options_rhel6(self):
         if not utils.is_rhel6():
             return
@@ -2625,6 +2660,7 @@ logging {
 }
 """)
 
+    @need_to_resolve_test_nodes
     def test_can_not_setup_cluster_for_unknown_transport_type_rhel6(self):
         if not utils.is_rhel6():
             return
diff --git a/pcs/test/test_constraints.py b/pcs/test/test_constraints.py
index fee7093..5a14e59 100644
--- a/pcs/test/test_constraints.py
+++ b/pcs/test/test_constraints.py
@@ -30,27 +30,27 @@ class ConstraintTest(unittest.TestCase):
 
     # Setups up a cluster with Resources, groups, master/slave resource and clones
     def setupClusterA(self,temp_cib):
-        line = "resource create D1 Dummy"
+        line = "resource create D1 ocf:heartbeat:Dummy"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == ""
 
-        line = "resource create D2 Dummy"
+        line = "resource create D2 ocf:heartbeat:Dummy"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == ""
 
-        line = "resource create D3 Dummy"
+        line = "resource create D3 ocf:heartbeat:Dummy"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == ""
 
-        line = "resource create D4 Dummy"
+        line = "resource create D4 ocf:heartbeat:Dummy"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == ""
 
-        line = "resource create D5 Dummy"
+        line = "resource create D5 ocf:heartbeat:Dummy"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == ""
 
-        line = "resource create D6 Dummy"
+        line = "resource create D6 ocf:heartbeat:Dummy"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == ""
 
@@ -71,7 +71,10 @@ class ConstraintTest(unittest.TestCase):
         assert returnVal == 0
         assert output == "", [output]
 
-        o, r = pcs(temp_cib, "resource create C1 Dummy --group C1-group")
+        o, r = pcs(
+            temp_cib,
+            "resource create C1 ocf:heartbeat:Dummy --group C1-group"
+        )
         assert r == 0 and o == "", o
 
         output, returnVal = pcs(temp_cib, "constraint location C1-group rule score=pingd defined pingd")
@@ -315,43 +318,43 @@ Ticket Constraints:
         assert returnVal == 0
 
     def testColocationConstraints(self):
-        line = "resource create M1 Dummy --master"
+        line = "resource create M1 ocf:heartbeat:Dummy --master"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == ""
 
-        line = "resource create M2 Dummy --master"
+        line = "resource create M2 ocf:heartbeat:Dummy --master"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == ""
 
-        line = "resource create M3 Dummy --master"
+        line = "resource create M3 ocf:heartbeat:Dummy --master"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == "",[returnVal, output]
 
-        line = "resource create M4 Dummy --master"
+        line = "resource create M4 ocf:heartbeat:Dummy --master"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == "",[returnVal, output]
 
-        line = "resource create M5 Dummy --master"
+        line = "resource create M5 ocf:heartbeat:Dummy --master"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == "",[returnVal, output]
 
-        line = "resource create M6 Dummy --master"
+        line = "resource create M6 ocf:heartbeat:Dummy --master"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == "",[returnVal, output]
 
-        line = "resource create M7 Dummy --master"
+        line = "resource create M7 ocf:heartbeat:Dummy --master"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == "",[returnVal, output]
 
-        line = "resource create M8 Dummy --master"
+        line = "resource create M8 ocf:heartbeat:Dummy --master"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == "",[returnVal, output]
 
-        line = "resource create M9 Dummy --master"
+        line = "resource create M9 ocf:heartbeat:Dummy --master"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == "",[returnVal, output]
 
-        line = "resource create M10 Dummy --master"
+        line = "resource create M10 ocf:heartbeat:Dummy --master"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == ""
 
@@ -387,15 +390,15 @@ Ticket Constraints:
         ac(o,'Location Constraints:\nOrdering Constraints:\nColocation Constraints:\n  D1 with D3-clone (score:INFINITY)\n  D1 with D2 (score:100)\n  D1 with D2 (score:-100)\n  Master with D5 (score:100)\n  M1-master with M2-master (score:INFINITY) (rsc-role:Master) (with-rsc-role:Master)\n  M3-master with M4-master (score:INFINITY)\n  M5-master with M6-master (score:500) (rsc-role:Slave) (with-rsc-role:Started)\n  M7-master with M8-master (score:INFINITY) (rsc-role:Started) (with-rsc-ro [...]
 
     def testColocationSets(self):
-        line = "resource create D7 Dummy"
+        line = "resource create D7 ocf:heartbeat:Dummy"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == ""
 
-        line = "resource create D8 Dummy"
+        line = "resource create D8 ocf:heartbeat:Dummy"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == ""
 
-        line = "resource create D9 Dummy"
+        line = "resource create D9 ocf:heartbeat:Dummy"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == ""
 
@@ -507,11 +510,11 @@ Colocation Constraints:
             print("WARNING: Pacemaker version is too old (must be >= 1.1.12) to test resource-discovery")
             return
 
-        o,r = pcs("resource create crd Dummy")
+        o,r = pcs("resource create crd ocf:heartbeat:Dummy")
         ac(o,"")
         assert r == 0
 
-        o,r = pcs("resource create crd1 Dummy")
+        o,r = pcs("resource create crd1 ocf:heartbeat:Dummy")
         ac(o,"")
         assert r == 0
 
@@ -570,31 +573,31 @@ Colocation Constraints:
         assert r == 1
 
     def testOrderSetsRemoval(self):
-        o,r = pcs("resource create T0 Dummy")
+        o,r = pcs("resource create T0 ocf:heartbeat:Dummy")
         ac(o,"")
         assert r == 0
-        o,r = pcs("resource create T1 Dummy")
+        o,r = pcs("resource create T1 ocf:heartbeat:Dummy")
         ac(o,"")
         assert r == 0
-        o,r = pcs("resource create T2 Dummy")
+        o,r = pcs("resource create T2 ocf:heartbeat:Dummy")
         ac(o,"")
         assert r == 0
-        o,r = pcs("resource create T3 Dummy")
+        o,r = pcs("resource create T3 ocf:heartbeat:Dummy")
         ac(o,"")
         assert r == 0
-        o,r = pcs("resource create T4 Dummy")
+        o,r = pcs("resource create T4 ocf:heartbeat:Dummy")
         ac(o,"")
         assert r == 0
-        o,r = pcs("resource create T5 Dummy")
+        o,r = pcs("resource create T5 ocf:heartbeat:Dummy")
         ac(o,"")
         assert r == 0
-        o,r = pcs("resource create T6 Dummy")
+        o,r = pcs("resource create T6 ocf:heartbeat:Dummy")
         ac(o,"")
         assert r == 0
-        o,r = pcs("resource create T7 Dummy")
+        o,r = pcs("resource create T7 ocf:heartbeat:Dummy")
         ac(o,"")
         assert r == 0
-        o,r = pcs("resource create T8 Dummy")
+        o,r = pcs("resource create T8 ocf:heartbeat:Dummy")
         ac(o,"")
         assert r == 0
         o,r = pcs("constraint order set T0 T1 T2")
@@ -641,15 +644,15 @@ Colocation Constraints:
         assert r == 0
 
     def testOrderSets(self):
-        line = "resource create D7 Dummy"
+        line = "resource create D7 ocf:heartbeat:Dummy"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == ""
 
-        line = "resource create D8 Dummy"
+        line = "resource create D8 ocf:heartbeat:Dummy"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == ""
 
-        line = "resource create D9 Dummy"
+        line = "resource create D9 ocf:heartbeat:Dummy"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == ""
 
@@ -875,7 +878,7 @@ Ticket Constraints:
         assert returnVal == 1
 
     def testLocationBadRules(self):
-        o,r = pcs("resource create stateful0 Dummy --master")
+        o,r = pcs("resource create stateful0 ocf:heartbeat:Dummy --master")
         ac(o,"")
         assert r == 0
 
@@ -896,7 +899,7 @@ Ticket Constraints:
 """)
         assert r == 0
 
-        o,r = pcs("resource create stateful1 Dummy --master")
+        o,r = pcs("resource create stateful1 ocf:heartbeat:Dummy --master")
         ac(o,"")
         assert r == 0
 
@@ -931,7 +934,7 @@ Ticket Constraints:
     def testMasterSlaveConstraint(self):
         os.system("CIB_file="+temp_cib+" cibadmin -R --scope nodes --xml-text '<nodes><node id=\"1\" uname=\"rh7-1\"/><node id=\"2\" uname=\"rh7-2\"/></nodes>'")
 
-        o,r = pcs("resource create dummy1 dummy")
+        o,r = pcs("resource create dummy1 ocf:heartbeat:Dummy")
         ac(o,"")
         assert r == 0
 
@@ -1051,7 +1054,7 @@ Ticket Constraints:
         assert r == 0
 
     def testMasterSlaveConstraintAutocorrect(self):
-        output, returnVal = pcs("resource create dummy1 dummy")
+        output, returnVal = pcs("resource create dummy1 ocf:heartbeat:Dummy")
         ac(output, "")
         self.assertEqual(0, returnVal)
 
@@ -1304,15 +1307,15 @@ Ticket Constraints:
     def testCloneConstraint(self):
         os.system("CIB_file="+temp_cib+" cibadmin -R --scope nodes --xml-text '<nodes><node id=\"1\" uname=\"rh7-1\"/><node id=\"2\" uname=\"rh7-2\"/></nodes>'")
 
-        o,r = pcs("resource create dummy1 dummy")
+        o,r = pcs("resource create dummy1 ocf:heartbeat:Dummy")
         ac(o,"")
         assert r == 0
 
-        o,r = pcs("resource create dummy Dummy --clone")
+        o,r = pcs("resource create dummy ocf:heartbeat:Dummy --clone")
         ac(o,"")
         assert r == 0
 
-        o,r = pcs("resource create dummy2 Dummy --group dummyG")
+        o,r = pcs("resource create dummy2 ocf:heartbeat:Dummy --group dummyG")
         ac(o,"")
         assert r == 0
 
@@ -1418,15 +1421,19 @@ Ticket Constraints:
         assert r == 0
 
     def testCloneConstraintAutocorrect(self):
-        output, returnVal = pcs("resource create dummy1 dummy")
+        output, returnVal = pcs("resource create dummy1 ocf:heartbeat:Dummy")
         ac(output, "")
         self.assertEqual(0, returnVal)
 
-        output, returnVal = pcs("resource create dummy Dummy --clone")
+        output, returnVal = pcs(
+            "resource create dummy ocf:heartbeat:Dummy --clone"
+        )
         ac(output, "")
         self.assertEqual(0, returnVal)
 
-        output, returnVal = pcs("resource create dummy2 Dummy --group dummyG")
+        output, returnVal = pcs(
+            "resource create dummy2 ocf:heartbeat:Dummy --group dummyG"
+        )
         ac(output, "")
         self.assertEqual(0, returnVal)
 
@@ -1885,7 +1892,7 @@ Ticket Constraints:
         # deleting the remote node resource
         output, returnVal = pcs(
             temp_cib,
-            'resource create vm-guest1 VirtualDomain hypervisor="qemu:///system" config="/root/guest1.xml" meta remote-node=guest1'
+            'resource create vm-guest1 ocf:heartbeat:VirtualDomain hypervisor="qemu:///system" config="/root/guest1.xml" meta remote-node=guest1'
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
@@ -1954,7 +1961,7 @@ Ticket Constraints:
         # removing the remote node
         output, returnVal = pcs(
             temp_cib,
-            'resource create vm-guest1 VirtualDomain hypervisor="qemu:///system" config="/root/guest1.xml" meta remote-node=guest1'
+            'resource create vm-guest1 ocf:heartbeat:VirtualDomain hypervisor="qemu:///system" config="/root/guest1.xml" meta remote-node=guest1'
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
@@ -2015,7 +2022,7 @@ Deleting Resource - vm-guest1
         # deleting the remote node resource
         output, returnVal = pcs(
             temp_cib,
-            'resource create vm-guest1 VirtualDomain hypervisor="qemu:///system" config="/root/guest1.xml" meta remote-node=guest1'
+            'resource create vm-guest1 ocf:heartbeat:VirtualDomain hypervisor="qemu:///system" config="/root/guest1.xml" meta remote-node=guest1'
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
@@ -2593,8 +2600,8 @@ class ConstraintBaseTest(unittest.TestCase, AssertPcsMixin):
     def setUp(self):
         shutil.copy(empty_cib, temp_cib)
         self.pcs_runner = PcsRunner(temp_cib)
-        self.assert_pcs_success('resource create A Dummy')
-        self.assert_pcs_success('resource create B Dummy')
+        self.assert_pcs_success('resource create A ocf:heartbeat:Dummy')
+        self.assert_pcs_success('resource create B ocf:heartbeat:Dummy')
 
 
 class CommonCreateWithSet(ConstraintBaseTest):
diff --git a/pcs/test/test_lib_cib_acl.py b/pcs/test/test_lib_cib_acl.py
index efaad7e..56b48eb 100644
--- a/pcs/test/test_lib_cib_acl.py
+++ b/pcs/test/test_lib_cib_acl.py
@@ -5,22 +5,30 @@ from __future__ import (
     unicode_literals,
 )
 
-from pcs.test.tools.pcs_unittest import TestCase
+from lxml import etree
 
 from pcs.test.tools.assertions import (
     assert_raise_library_error,
+    assert_report_item_equal,
     assert_xml_equal,
+    ExtendedAssertionsMixin,
 )
 from pcs.test.tools.misc import get_test_resource as rc
 from pcs.test.tools.xml import get_xml_manipulation_creator_from_file
+from pcs.test.tools.pcs_unittest import mock, TestCase
 
-from pcs.lib.cib import acl as lib
 from pcs.common import report_codes
-from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.lib.cib import acl as lib
+from pcs.lib.errors import (
+    LibraryError,
+    ReportItemSeverity as severities,
+)
 
 class LibraryAclTest(TestCase):
     def setUp(self):
-        self.create_cib = get_xml_manipulation_creator_from_file(rc("cib-empty.xml"))
+        self.create_cib = get_xml_manipulation_creator_from_file(
+            rc("cib-empty.xml")
+        )
         self.cib = self.create_cib()
 
     def fixture_add_role(self, role_id):
@@ -35,6 +43,185 @@ class LibraryAclTest(TestCase):
         assert_xml_equal(expected_xml, got_xml)
 
 
+class ValidatePermissionsTest(LibraryAclTest):
+    def setUp(self):
+        self.xml = """
+        <xml>
+           <test id="test-id">
+               <another id="another-id"/>
+               <last id="last-id"/>
+           </test>
+        </xml>
+        """
+        self.tree = etree.XML(self.xml)
+        self.allowed_permissions = ["read", "write", "deny"]
+        self.allowed_scopes = ["xpath", "id"]
+
+    def test_success(self):
+        permissions = [
+            ("read", "id", "test-id"),
+            ("write", "id", "another-id"),
+            ("deny", "id", "last-id"),
+            ("read", "xpath", "any string"),
+            ("write", "xpath", "maybe xpath"),
+            ("deny", "xpath", "xpath")
+        ]
+        lib.validate_permissions(self.tree, permissions)
+
+    def test_unknown_permission(self):
+        permissions = [
+            ("read", "id", "test-id"),
+            ("unknown", "id", "another-id"),
+            ("write", "xpath", "my xpath"),
+            ("allow", "xpath", "xpath")
+        ]
+        assert_raise_library_error(
+            lambda: lib.validate_permissions(self.tree, permissions),
+            (
+                severities.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_value": "unknown",
+                    "option_name": "permission",
+                    "allowed_values": self.allowed_permissions,
+                },
+                None
+            ),
+            (
+                severities.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_value": "allow",
+                    "option_name": "permission",
+                    "allowed_values": self.allowed_permissions,
+                },
+                None
+            )
+        )
+
+    def test_unknown_scope(self):
+        permissions = [
+            ("read", "id", "test-id"),
+            ("write", "not_id", "test-id"),
+            ("deny", "not_xpath", "some xpath"),
+            ("read", "xpath", "xpath")
+        ]
+        assert_raise_library_error(
+            lambda: lib.validate_permissions(self.tree, permissions),
+            (
+                severities.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_value": "not_id",
+                    "option_name": "scope type",
+                    "allowed_values": self.allowed_scopes,
+                },
+                None
+            ),
+            (
+                severities.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_value": "not_xpath",
+                    "option_name": "scope type",
+                    "allowed_values": self.allowed_scopes,
+                },
+                None
+            )
+        )
+
+    def test_not_existing_id(self):
+        permissions = [
+            ("read", "id", "test-id"),
+            ("write", "id", "id"),
+            ("deny", "id", "last"),
+            ("write", "xpath", "maybe xpath")
+        ]
+        assert_raise_library_error(
+            lambda: lib.validate_permissions(self.tree, permissions),
+            (
+                severities.ERROR,
+                report_codes.ID_NOT_FOUND,
+                {
+                    "id": "id",
+                    "id_description": "id",
+                }
+            ),
+            (
+                severities.ERROR,
+                report_codes.ID_NOT_FOUND,
+                {
+                    "id": "last",
+                    "id_description": "id",
+                }
+            )
+        )
+
+
+class FindRoleTest(LibraryAclTest, ExtendedAssertionsMixin):
+    def test_success(self):
+        xml = """
+        <xml>
+            <acl_role id="role-id"/>
+            <role id="role-id"/>
+        </xml>
+        """
+        assert_xml_equal(
+            '<acl_role id="role-id"/>',
+            etree.tostring(lib.find_role(etree.XML(xml), "role-id")).decode()
+        )
+
+    def test_not_exist(self):
+        xml = """
+        <xml>
+            <role id="role-id"/>
+        </xml>
+        """
+        self.assert_raises(
+            lib.AclRoleNotFound,
+            lambda: lib.find_role(etree.XML(xml), "role-id"),
+            {"role_id": "role-id"}
+        )
+
+
+class FindPermissionTest(LibraryAclTest):
+    def test_success(self):
+        xml = """
+        <cib>
+            <acls>
+                <acl_permission id="permission-id"/>
+                <acl_permission id="permission-id2"/>
+            </acls>
+        </cib>
+        """
+        assert_xml_equal(
+            '<acl_permission id="permission-id"/>',
+            etree.tostring(lib._find_permission(
+                etree.XML(xml), "permission-id")
+            ).decode()
+        )
+
+    def test_not_exist(self):
+        xml = """
+        <cib>
+            <acls>
+                <acl_permission id="permission-id2"/>
+            </acls>
+        </cib>
+        """
+        assert_raise_library_error(
+            lambda: lib._find_permission(etree.XML(xml), "permission-id"),
+            (
+                severities.ERROR,
+                report_codes.ID_NOT_FOUND,
+                {
+                    "id": "permission-id",
+                    "id_description": "permission",
+                }
+            )
+        )
+
+
 class CreateRoleTest(LibraryAclTest):
     def test_create_for_new_role_id(self):
         role_id = 'new-id'
@@ -72,85 +259,334 @@ class CreateRoleTest(LibraryAclTest):
             ),
         )
 
-class AddPermissionsToRoleTest(LibraryAclTest):
-    def test_add_for_correct_permissions(self):
-        role_id = 'role1'
-        self.fixture_add_role(role_id)
+class RemoveRoleTest(LibraryAclTest, ExtendedAssertionsMixin):
+    def setUp(self):
+        self.xml = """
+        <cib>
+            <configuration>
+                <acls>
+                    <acl_role id="role-id"/>
+                    <acl_target>
+                        <role id="role-id"/>
+                    </acl_target>
+                    <acl_group>
+                        <role id="some-role"/>
+                        <role id="role-id"/>
+                    </acl_group>
+                </acls>
+            </configuration>
+        </cib>
+        """
+        self.tree = etree.XML(self.xml)
 
-        lib.add_permissions_to_role(
-            self.cib.tree, role_id, [('read', 'xpath', '/whatever')]
+    def test_success(self):
+        expected_xml = """
+        <cib>
+            <configuration>
+                <acls>
+                    <acl_target/>
+                    <acl_group>
+                        <role id="some-role"/>
+                    </acl_group>
+                </acls>
+            </configuration>
+        </cib>
+        """
+        lib.remove_role(self.tree, "role-id")
+        assert_xml_equal(expected_xml, etree.tostring(self.tree).decode())
+
+    def test_autodelete(self):
+        expected_xml = """
+        <cib>
+            <configuration>
+                <acls>
+                    <acl_group>
+                        <role id="some-role"/>
+                    </acl_group>
+                </acls>
+            </configuration>
+        </cib>
+        """
+        lib.remove_role(self.tree, "role-id", autodelete_users_groups=True)
+        assert_xml_equal(expected_xml, etree.tostring(self.tree).decode())
+
+    def test_id_not_exists(self):
+        self.assert_raises(
+            lib.AclRoleNotFound,
+            lambda: lib.remove_role(self.tree, "id-of-role"),
+            {"role_id": "id-of-role"}
         )
 
-        self.assert_cib_equal(
-            self.create_cib().append_to_first_tag_name('configuration', '''
-              <acls>
-                <acl_role id="{0}">
-                  <acl_permission id="{0}-read" kind="read" xpath="/whatever"/>
-                </acl_role>
-              </acls>
-            '''.format(role_id))
+
+class AssignRoleTest(LibraryAclTest):
+    def setUp(self):
+        LibraryAclTest.setUp(self)
+        self.cib.append_to_first_tag_name(
+            "configuration",
+            """
+                <acls>
+                    <acl_role id="role1"/>
+                    <acl_role id="role2"/>
+                    <acl_target id="target1">
+                        <role id="role2"/>
+                    </acl_target>
+                    <acl_group id="group1"/>
+                </acls>
+            """
         )
 
+    def test_success_target(self):
+        target = self.cib.tree.find(
+            ".//acl_target[@id='{0}']".format("target1")
+        )
+        role = self.cib.tree.find(".//acl_role[@id='{0}']".format("role1"))
+        lib.assign_role(target, role)
+        self.assert_cib_equal(self.create_cib().append_to_first_tag_name(
+            "configuration",
+            """
+                <acls>
+                    <acl_role id="role1"/>
+                    <acl_role id="role2"/>
+                    <acl_target id="target1">
+                        <role id="role2"/>
+                        <role id="role1"/>
+                    </acl_target>
+                    <acl_group id="group1"/>
+                </acls>
+            """
+        ))
 
-    def test_refuse_add_for_nonexistent_role_id(self):
-        role_id = 'role1'
+    def test_sucess_group(self):
+        group = self.cib.tree.find(".//acl_group[@id='{0}']".format("group1"))
+        role = self.cib.tree.find(".//acl_role[@id='{0}']".format("role1"))
+        lib.assign_role(group, role)
+        self.assert_cib_equal(self.create_cib().append_to_first_tag_name(
+            "configuration",
+            """
+                <acls>
+                    <acl_role id="role1"/>
+                    <acl_role id="role2"/>
+                    <acl_target id="target1">
+                        <role id="role2"/>
+                    </acl_target>
+                    <acl_group id="group1">
+                        <role id="role1"/>
+                    </acl_group>
+                </acls>
+            """
+        ))
+
+    def test_role_already_assigned(self):
+        target = self.cib.tree.find(
+            ".//acl_target[@id='{0}']".format("target1")
+        )
+        role = self.cib.tree.find(".//acl_role[@id='{0}']".format("role2"))
         assert_raise_library_error(
-            lambda: lib.add_permissions_to_role(
-                self.cib.tree, role_id, [('read', 'xpath', '/whatever')]
-            ),
+            lambda: lib.assign_role(target, role),
             (
                 severities.ERROR,
-                report_codes.ID_NOT_FOUND,
+                report_codes.CIB_ACL_ROLE_IS_ALREADY_ASSIGNED_TO_TARGET,
                 {
-                    "id": role_id,
-                    "id_description": "role",
+                    "role_id": "role2",
+                    "target_id": "target1",
                 }
-            ),
+            )
         )
 
-    def test_refuse_bad_permission_and_bad_scope_type(self):
-        role_id = 'role1'
-        self.fixture_add_role(role_id)
 
+class UnassignRoleTest(LibraryAclTest):
+    def setUp(self):
+        LibraryAclTest.setUp(self)
+        self.cib.append_to_first_tag_name(
+            "configuration",
+            """
+                <acls>
+                    <acl_role id="role1"/>
+                    <acl_role id="role2"/>
+                    <acl_target id="target1">
+                        <role id="role3"/>
+                        <role id="role2"/>
+                    </acl_target>
+                    <acl_group id="group1">
+                        <role id="role1"/>
+                    </acl_group>
+                </acls>
+            """
+        )
+
+    def test_success_target(self):
+        target = self.cib.tree.find(
+            ".//acl_target[@id='{0}']".format("target1")
+        )
+        lib.unassign_role(target, "role2")
+        self.assert_cib_equal(self.create_cib().append_to_first_tag_name(
+            "configuration",
+            """
+                <acls>
+                    <acl_role id="role1"/>
+                    <acl_role id="role2"/>
+                    <acl_target id="target1">
+                        <role id="role3"/>
+                    </acl_target>
+                    <acl_group id="group1">
+                        <role id="role1"/>
+                    </acl_group>
+                </acls>
+            """
+        ))
+
+    def test_success_group(self):
+        group = self.cib.tree.find(".//acl_group[@id='{0}']".format("group1"))
+        lib.unassign_role(group, "role1")
+        self.assert_cib_equal(self.create_cib().append_to_first_tag_name(
+            "configuration",
+            """
+                <acls>
+                    <acl_role id="role1"/>
+                    <acl_role id="role2"/>
+                    <acl_target id="target1">
+                        <role id="role3"/>
+                        <role id="role2"/>
+                    </acl_target>
+                    <acl_group id="group1"/>
+                </acls>
+            """
+        ))
+
+    def test_not_existing_role(self):
+        target = self.cib.tree.find(
+            ".//acl_target[@id='{0}']".format("target1")
+        )
+        lib.unassign_role(target, "role3")
+        self.assert_cib_equal(self.create_cib().append_to_first_tag_name(
+            "configuration",
+            """
+                <acls>
+                    <acl_role id="role1"/>
+                    <acl_role id="role2"/>
+                    <acl_target id="target1">
+                        <role id="role2"/>
+                    </acl_target>
+                    <acl_group id="group1">
+                        <role id="role1"/>
+                    </acl_group>
+                </acls>
+            """
+        ))
+
+    def test_role_not_assigned(self):
+        target = self.cib.tree.find(
+            ".//acl_target[@id='{0}']".format("target1")
+        )
         assert_raise_library_error(
-            lambda: lib.add_permissions_to_role(
-                self.cib.tree, role_id, [('readX', 'xpathX', '/whatever')]
-            ),
-            (
-                severities.ERROR,
-                report_codes.INVALID_OPTION_VALUE,
-                {
-                    "option_name": "permission",
-                    "option_value": "readX",
-                    "allowed_values": ["read", "write", "deny"],
-                }
-            ),
+            lambda: lib.unassign_role(target, "role1"),
             (
                 severities.ERROR,
-                report_codes.INVALID_OPTION_VALUE,
+                report_codes.CIB_ACL_ROLE_IS_NOT_ASSIGNED_TO_TARGET,
                 {
-                    "option_name": "scope type",
-                    "option_value": "xpathX",
-                    "allowed_values": ["xpath", "id"],
+                    "role_id": "role1",
+                    "target_id": "target1",
                 }
-            ),
+            )
         )
 
-    def test_refuse_pointing_to_nonexisten_id(self):
+    def test_autodelete(self):
+        target = self.cib.tree.find(".//acl_group[@id='{0}']".format("group1"))
+        lib.unassign_role(target, "role1", True)
+        self.assert_cib_equal(self.create_cib().append_to_first_tag_name(
+            "configuration",
+            """
+                <acls>
+                    <acl_role id="role1"/>
+                    <acl_role id="role2"/>
+                    <acl_target id="target1">
+                        <role id="role3"/>
+                        <role id="role2"/>
+                    </acl_target>
+                </acls>
+            """
+        ))
+
+
+class FindTargetTest(LibraryAclTest, ExtendedAssertionsMixin):
+    def setUp(self):
+        LibraryAclTest.setUp(self)
+        self.cib.append_to_first_tag_name(
+            "configuration", '<acl_target id="target1"/>'
+        )
+        self.cib.append_to_first_tag_name(
+            "configuration",
+            """
+                <acls>
+                    <acl_target id="target1" description="test"/>
+                </acls>
+            """
+        )
+
+    def test_success(self):
+        assert_xml_equal(
+            '<acl_target id="target1" description="test"/>',
+            etree.tostring(lib.find_target(self.cib.tree, "target1")).decode()
+        )
+
+    def test_not_found(self):
+        self.assert_raises(
+            lib.AclTargetNotFound,
+            lambda: lib.find_target(self.cib.tree, "target2"),
+            {"target_id": "target2"}
+        )
+
+
+class FindGroupTest(LibraryAclTest, ExtendedAssertionsMixin):
+    def setUp(self):
+        LibraryAclTest.setUp(self)
+        self.cib.append_to_first_tag_name(
+            "configuration", '<acl_group id="group2"/>'
+        )
+        self.cib.append_to_first_tag_name(
+            "configuration",
+            """
+                <acls>
+                    <acl_group id="group1" description="desc"/>
+                </acls>
+            """
+        )
+
+    def test_success(self):
+        assert_xml_equal(
+            '<acl_group id="group1" description="desc"/>',
+            etree.tostring(lib.find_group(self.cib.tree, "group1")).decode()
+        )
+
+    def test_not_found(self):
+        self.assert_raises(
+            lib.AclGroupNotFound,
+            lambda: lib.find_group(self.cib.tree, "group2"),
+            {"group_id": "group2"}
+        )
+
+
+class AddPermissionsToRoleTest(LibraryAclTest):
+    def test_add_for_correct_permissions(self):
         role_id = 'role1'
         self.fixture_add_role(role_id)
 
-        assert_raise_library_error(
-            lambda: lib.add_permissions_to_role(
-                self.cib.tree, role_id, [('read', 'id', 'non-existent')]
-            ),
-            (
-                severities.ERROR,
-                report_codes.ID_NOT_FOUND,
-                {'id': 'non-existent'}
-            ),
+        lib.add_permissions_to_role(
+            self.cib.tree.find(".//acl_role[@id='{0}']".format(role_id)),
+            [('read', 'xpath', '/whatever')]
         )
 
+        self.assert_cib_equal(
+            self.create_cib().append_to_first_tag_name('configuration', '''
+              <acls>
+                <acl_role id="{0}">
+                  <acl_permission id="{0}-read" kind="read" xpath="/whatever"/>
+                </acl_role>
+              </acls>
+            '''.format(role_id))
+        )
+
+
 class ProvideRoleTest(LibraryAclTest):
     def test_add_role_for_nonexisting_id(self):
         role_id = 'new-id'
@@ -178,6 +614,134 @@ class ProvideRoleTest(LibraryAclTest):
             '''.format(role_id))
         )
 
+
+class CreateTargetTest(LibraryAclTest):
+    def setUp(self):
+        LibraryAclTest.setUp(self)
+        self.fixture_add_role("target3")
+        self.cib.append_to_first_tag_name("acls", '<acl_target id="target2"/>')
+
+    def test_success(self):
+        lib.create_target(self.cib.tree, "target1")
+        self.assert_cib_equal(self.create_cib().append_to_first_tag_name(
+            "configuration",
+            """
+            <acls>
+                <acl_role id="target3"/>
+                <acl_target id="target2"/>
+                <acl_target id="target1"/>
+            </acls>
+            """
+        ))
+
+    def test_target_id_is_not_unique_id(self):
+        lib.create_target(self.cib.tree, "target3")
+        self.assert_cib_equal(self.create_cib().append_to_first_tag_name(
+            "configuration",
+            """
+            <acls>
+                <acl_role id="target3"/>
+                <acl_target id="target2"/>
+                <acl_target id="target3"/>
+            </acls>
+            """
+        ))
+
+    def test_target_id_is_not_unique_target_id(self):
+        assert_raise_library_error(
+            lambda: lib.create_target(self.cib.tree, "target2"),
+            (
+                severities.ERROR,
+                report_codes.CIB_ACL_TARGET_ALREADY_EXISTS,
+                {"target_id":"target2"}
+            )
+        )
+
+
+class CreateGroupTest(LibraryAclTest):
+    def setUp(self):
+        LibraryAclTest.setUp(self)
+        self.fixture_add_role("group2")
+
+    def test_success(self):
+        lib.create_group(self.cib.tree, "group1")
+        self.assert_cib_equal(self.create_cib().append_to_first_tag_name(
+            "configuration",
+            """
+            <acls>
+                <acl_role id="group2"/>
+                <acl_group id="group1"/>
+            </acls>
+            """
+        ))
+
+    def test_existing_id(self):
+        assert_raise_library_error(
+            lambda: lib.create_group(self.cib.tree, "group2"),
+            (
+                severities.ERROR,
+                report_codes.ID_ALREADY_EXISTS,
+                {"id": "group2"}
+            )
+        )
+
+
+class RemoveTargetTest(LibraryAclTest, ExtendedAssertionsMixin):
+    def setUp(self):
+        LibraryAclTest.setUp(self)
+        self.fixture_add_role("target2")
+        self.cib.append_to_first_tag_name("acls", '<acl_target id="target1"/>')
+
+    def test_success(self):
+        lib.remove_target(self.cib.tree, "target1")
+        self.assert_cib_equal(self.create_cib().append_to_first_tag_name(
+            "configuration",
+            """
+            <acls>
+                <acl_role id="target2"/>
+            </acls>
+            """
+        ))
+
+    def test_not_existing(self):
+        assert_raise_library_error(
+            lambda: lib.remove_target(self.cib.tree, "target2"),
+            (
+                severities.ERROR,
+                report_codes.ID_NOT_FOUND,
+                {"id": "target2"}
+            )
+        )
+
+
+class RemoveGroupTest(LibraryAclTest, ExtendedAssertionsMixin):
+    def setUp(self):
+        LibraryAclTest.setUp(self)
+        self.fixture_add_role("group2")
+        self.cib.append_to_first_tag_name("acls", '<acl_group id="group1"/>')
+
+    def test_success(self):
+        lib.remove_group(self.cib.tree, "group1")
+        self.assert_cib_equal(self.create_cib().append_to_first_tag_name(
+            "configuration",
+            """
+            <acls>
+                <acl_role id="group2"/>
+            </acls>
+            """
+        ))
+
+    def test_not_existing(self):
+        assert_raise_library_error(
+            lambda: lib.remove_group(self.cib.tree, "group2"),
+            (
+                severities.ERROR,
+                report_codes.ID_NOT_FOUND,
+                {"id": "group2"}
+            )
+        )
+
+
 class RemovePermissionForReferenceTest(LibraryAclTest):
     def test_has_no_efect_when_id_not_referenced(self):
         lib.remove_permissions_referencing(self.cib.tree, 'dummy')
@@ -212,3 +776,309 @@ class RemovePermissionForReferenceTest(LibraryAclTest):
               </acls>
             ''')
         )
+
+
+class RemovePermissionTest(LibraryAclTest):
+    def setUp(self):
+        self.xml = """
+        <cib>
+            <configuration>
+                <acls>
+                    <acl_role id="role-id">
+                        <acl_permission id="role-permission"/>
+                        <acl_permission id="permission-id"/>
+                    </acl_role>
+                </acls>
+            </configuration>
+        </cib>
+        """
+        self.tree = etree.XML(self.xml)
+
+    def test_success(self):
+        expected_xml = """
+        <cib>
+            <configuration>
+                <acls>
+                    <acl_role id="role-id">
+                        <acl_permission id="role-permission"/>
+                    </acl_role>
+                </acls>
+            </configuration>
+        </cib>
+        """
+        lib.remove_permission(self.tree, "permission-id")
+        assert_xml_equal(expected_xml, etree.tostring(self.tree).decode())
+
+    def test_not_existing_id(self):
+        assert_raise_library_error(
+            lambda: lib.remove_permission(self.tree, "role-id"),
+            (
+                severities.ERROR,
+                report_codes.ID_NOT_FOUND,
+                {
+                    "id": "role-id",
+                    "id_description": "permission",
+                }
+            )
+        )
+
+
+class GetRoleListTest(LibraryAclTest):
+    def test_success(self):
+        self.cib.append_to_first_tag_name(
+            "configuration",
+            """
+            <acls>
+                <acl_role id="role1" description="desc1">
+                    <acl_permission id="role1-perm1" kind="read" xpath="XPATH"/>
+                    <acl_permission
+                        id="role1-perm2"
+                        description="desc"
+                        kind="write"
+                        reference="id"
+                    />
+                    <acl_permission
+                        id="role1-perm3"
+                        kind="deny"
+                        object-type="type"
+                        attribute="attr"
+                    />
+                </acl_role>
+                <acl_target id="target1"/>
+                <acl_role id="role2"/>
+            </acls>
+            """
+        )
+        expected = [
+            {
+                "id": "role1",
+                "description": "desc1",
+                "permission_list": [
+                    {
+                        "id": "role1-perm1",
+                        "description": None,
+                        "kind": "read",
+                        "xpath": "XPATH",
+                        "reference": None,
+                        "object-type": None,
+                        "attribute": None,
+                    },
+                    {
+                        "id": "role1-perm2",
+                        "description": "desc",
+                        "kind": "write",
+                        "xpath": None,
+                        "reference": "id",
+                        "object-type": None,
+                        "attribute": None,
+                    },
+                    {
+                        "id": "role1-perm3",
+                        "description": None,
+                        "kind": "deny",
+                        "xpath": None,
+                        "reference": None,
+                        "object-type": "type",
+                        "attribute": "attr",
+                    }
+                ]
+            },
+            {
+                "id": "role2",
+                "description": None,
+                "permission_list": [],
+            }
+        ]
+        self.assertEqual(expected, lib.get_role_list(self.cib.tree))
+
+
+class GetPermissionListTest(LibraryAclTest):
+    def test_success(self):
+        role_el = etree.Element("acl_role")
+        etree.SubElement(
+            role_el,
+            "acl_permission",
+            {
+                "id":"role1-perm1",
+                "kind": "read",
+                "xpath": "XPATH",
+            }
+        )
+        etree.SubElement(
+            role_el,
+            "acl_permission",
+            {
+                "id": "role1-perm2",
+                "description": "desc",
+                "kind": "write",
+                "reference": "id",
+            }
+        )
+        etree.SubElement(
+            role_el,
+            "acl_permission",
+            {
+                "id": "role1-perm3",
+                "kind": "deny",
+                "object-type": "type",
+                "attribute": "attr",
+            }
+        )
+        expected = [
+            {
+                "id": "role1-perm1",
+                "description": None,
+                "kind": "read",
+                "xpath": "XPATH",
+                "reference": None,
+                "object-type": None,
+                "attribute": None,
+            },
+            {
+                "id": "role1-perm2",
+                "description": "desc",
+                "kind": "write",
+                "xpath": None,
+                "reference": "id",
+                "object-type": None,
+                "attribute": None,
+            },
+            {
+                "id": "role1-perm3",
+                "description": None,
+                "kind": "deny",
+                "xpath": None,
+                "reference": None,
+                "object-type": "type",
+                "attribute": "attr",
+            }
+        ]
+        self.assertEqual(expected, lib._get_permission_list(role_el))
+
+
+ at mock.patch("pcs.lib.cib.acl._get_target_like_list_with_tag")
+class GetTargetListTest(TestCase):
+    def test_success(self, mock_fn):
+        mock_fn.return_value = "returned data"
+        self.assertEqual("returned data", lib.get_target_list("tree"))
+        mock_fn.assert_called_once_with("tree", "acl_target")
+
+
+ at mock.patch("pcs.lib.cib.acl._get_target_like_list_with_tag")
+class GetGroupListTest(TestCase):
+    def test_success(self, mock_fn):
+        mock_fn.return_value = "returned data"
+        self.assertEqual("returned data", lib.get_group_list("tree"))
+        mock_fn.assert_called_once_with("tree", "acl_group")
+
+
+class GetTargetLikeListWithTagTest(LibraryAclTest):
+    def setUp(self):
+        LibraryAclTest.setUp(self)
+        self.cib.append_to_first_tag_name(
+            "configuration",
+            """
+                <acls>
+                    <acl_target id="target1"/>
+                    <acl_group id="group1">
+                        <role id="role1"/>
+                    </acl_group>
+                    <acl_target id="target2">
+                        <role id="role1"/>
+                        <role id="role2"/>
+                        <role id="role3"/>
+                    </acl_target>
+                    <acl_group id="group2"/>
+                </acls>
+            """
+        )
+
+    def test_success_targets(self):
+        self.assertEqual(
+            [
+                {
+                    "id": "target1",
+                    "role_list": [],
+                },
+                {
+                    "id": "target2",
+                    "role_list": ["role1", "role2", "role3"],
+                }
+            ],
+            lib._get_target_like_list_with_tag(self.cib.tree, "acl_target")
+        )
+
+    def test_success_groups(self):
+        self.assertEqual(
+            [
+                {
+                    "id": "group1",
+                    "role_list": ["role1"],
+                },
+                {
+                    "id": "group2",
+                    "role_list": [],
+                }
+            ],
+            lib._get_target_like_list_with_tag(self.cib.tree, "acl_group")
+        )
+
+
+class GetRoleListOfTargetTest(LibraryAclTest):
+    def test_success(self):
+        target_el = etree.Element("target")
+        etree.SubElement(target_el, "role", {"id": "role1"})
+        etree.SubElement(target_el, "role", {"id": "role2"})
+        etree.SubElement(target_el, "role")
+        etree.SubElement(target_el, "role", {"id": "role3"})
+        self.assertEqual(
+            ["role1", "role2", "role3"], lib._get_role_list_of_target(target_el)
+        )
+
+
+class AclErrorToReportItemTest(TestCase, ExtendedAssertionsMixin):
+    def test_acl_target_not_found(self):
+        assert_report_item_equal(
+            lib.acl_error_to_report_item(lib.AclTargetNotFound("id")),
+            (
+                severities.ERROR,
+                report_codes.ID_NOT_FOUND,
+                {
+                    "id": "id",
+                    "id_description": "user",
+                }
+            )
+        )
+
+    def test_acl_group_not_found(self):
+        assert_report_item_equal(
+            lib.acl_error_to_report_item(lib.AclGroupNotFound("id")),
+            (
+                severities.ERROR,
+                report_codes.ID_NOT_FOUND,
+                {
+                    "id": "id",
+                    "id_description": "group",
+                }
+            )
+        )
+
+    def test_acl_role_not_found(self):
+        assert_report_item_equal(
+            lib.acl_error_to_report_item(lib.AclRoleNotFound("id")),
+            (
+                severities.ERROR,
+                report_codes.ID_NOT_FOUND,
+                {
+                    "id": "id",
+                    "id_description": "role",
+                }
+            )
+        )
+
+    def test_unknown_exception(self):
+        self.assert_raises(
+            LibraryError,
+            lambda: lib.acl_error_to_report_item(LibraryError())
+        )
+
diff --git a/pcs/test/test_lib_cib_tools.py b/pcs/test/test_lib_cib_tools.py
index ec9c312..8c85b5b 100644
--- a/pcs/test/test_lib_cib_tools.py
+++ b/pcs/test/test_lib_cib_tools.py
@@ -74,6 +74,33 @@ class DoesIdExistTest(CibToolsTest):
         self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1ba"))
         self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1bb"))
 
+    def test_ignore_acl_target(self):
+        self.cib.append_to_first_tag_name(
+            "configuration",
+            """
+            <acls>
+                <acl_target id="target1"/>
+            </acls>
+            """
+        )
+        self.assertFalse(lib.does_id_exist(self.cib.tree, "target1"))
+
+    def test_ignore_acl_role_references(self):
+        self.cib.append_to_first_tag_name(
+            "configuration",
+            """
+            <acls>
+                <acl_target id="target1">
+                    <role id="role1"/>
+                    <role id="role2"/>
+                </acl_target>
+            </acls>
+            """
+        )
+        self.assertFalse(lib.does_id_exist(self.cib.tree, "role1"))
+        self.assertFalse(lib.does_id_exist(self.cib.tree, "role2"))
+
+
 class FindUniqueIdTest(CibToolsTest):
     def test_already_unique(self):
         self.fixture_add_primitive_with_id("myId")
@@ -96,6 +123,23 @@ class FindUniqueIdTest(CibToolsTest):
         self.fixture_add_primitive_with_id("myId-3")
         self.assertEqual("myId-2", lib.find_unique_id(self.cib.tree, "myId"))
 
+class CreateNvsetIdTest(TestCase):
+    def test_create_plain_id_when_no_confilicting_id_there(self):
+        context = etree.fromstring('<cib><a id="b"/></cib>')
+        self.assertEqual(
+            "b-name",
+            lib.create_subelement_id(context.find(".//a"), "name")
+        )
+
+    def test_create_decorated_id_when_conflicting_id_there(self):
+        context = etree.fromstring(
+            '<cib><a id="b"><c id="b-name"/></a></cib>'
+        )
+        self.assertEqual(
+            "b-name-1",
+            lib.create_subelement_id(context.find(".//a"), "name")
+        )
+
 class GetConfigurationTest(CibToolsTest):
     def test_success_if_exists(self):
         self.assertEqual(
@@ -462,3 +506,46 @@ class UpgradeCibTest(TestCase):
         mock_file.seek.assert_called_once_with(0)
         mock_file.read.assert_called_once_with()
 
+
+class EtreeElementAttributesToDictTest(TestCase):
+    def setUp(self):
+        self.el = etree.Element(
+            "test_element",
+            {
+                "id": "test_id",
+                "description": "some description",
+                "attribute": "value",
+            }
+        )
+
+    def test_only_existing(self):
+        self.assertEqual(
+            {
+                "id": "test_id",
+                "attribute": "value",
+            },
+            lib.etree_element_attibutes_to_dict(self.el, ["id", "attribute"])
+        )
+
+    def test_only_not_existing(self):
+        self.assertEqual(
+            {
+                "_id": None,
+                "not_existing": None,
+            },
+            lib.etree_element_attibutes_to_dict(
+                self.el, ["_id", "not_existing"]
+            )
+        )
+
+    def test_mix(self):
+        self.assertEqual(
+            {
+                "id": "test_id",
+                "attribute": "value",
+                "not_existing": None,
+            },
+            lib.etree_element_attibutes_to_dict(
+                self.el, ["id", "not_existing", "attribute"]
+            )
+        )
diff --git a/pcs/test/test_lib_commands_sbd.py b/pcs/test/test_lib_commands_sbd.py
index f8146ce..d323252 100644
--- a/pcs/test/test_lib_commands_sbd.py
+++ b/pcs/test/test_lib_commands_sbd.py
@@ -66,7 +66,6 @@ class ValidateSbdOptionsTest(TestCase):
         self.allowed_sbd_options = sorted([
             "SBD_DELAY_START", "SBD_STARTMODE", "SBD_WATCHDOG_TIMEOUT"
         ])
-        self.allowed_sbd_options_str = ", ".join(self.allowed_sbd_options)
 
     def test_all_ok(self):
         config = {
@@ -94,7 +93,6 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_name": "SBD_UNKNOWN",
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
-                        "allowed_str": self.allowed_sbd_options_str
                     },
                     report_codes.FORCE_OPTIONS
                 ),
@@ -105,7 +103,6 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_name": "another_unknown_option",
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
-                        "allowed_str": self.allowed_sbd_options_str
                     },
                     report_codes.FORCE_OPTIONS
                 )
@@ -131,7 +128,6 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_name": "SBD_UNKNOWN",
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
-                        "allowed_str": self.allowed_sbd_options_str
                     },
                     None
                 ),
@@ -142,7 +138,6 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_name": "another_unknown_option",
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
-                        "allowed_str": self.allowed_sbd_options_str
                     },
                     None
                 )
@@ -169,7 +164,6 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_name": "SBD_WATCHDOG_DEV",
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
-                        "allowed_str": self.allowed_sbd_options_str
                     },
                     None
                 ),
@@ -180,7 +174,6 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_name": "SBD_OPTS",
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
-                        "allowed_str": self.allowed_sbd_options_str
                     },
                     None
                 )
@@ -207,7 +200,6 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_name": "SBD_WATCHDOG_DEV",
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
-                        "allowed_str": self.allowed_sbd_options_str
                     },
                     None
                 ),
@@ -218,7 +210,6 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_name": "SBD_OPTS",
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
-                        "allowed_str": self.allowed_sbd_options_str
                     },
                     None
                 ),
@@ -229,7 +220,6 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_name": "SBD_UNKNOWN",
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
-                        "allowed_str": self.allowed_sbd_options_str
                     },
                     report_codes.FORCE_OPTIONS
                 )
@@ -257,7 +247,6 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_name": "SBD_WATCHDOG_DEV",
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
-                        "allowed_str": self.allowed_sbd_options_str
                     },
                     None
                 ),
@@ -268,7 +257,6 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_name": "SBD_OPTS",
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
-                        "allowed_str": self.allowed_sbd_options_str
                     },
                     None
                 ),
@@ -279,7 +267,6 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_name": "SBD_UNKNOWN",
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
-                        "allowed_str": self.allowed_sbd_options_str
                     },
                     None
                 ),
@@ -290,7 +277,6 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_name": "SBD_PACEMAKER",
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
-                        "allowed_str": self.allowed_sbd_options_str
                     },
                     None
                 )
@@ -319,7 +305,6 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_name": "SBD_WATCHDOG_TIMEOUT",
                         "option_value": "-1",
                         "allowed_values": "nonnegative integer",
-                        "allowed_values_str": "nonnegative integer",
                     },
                     None
                 )
@@ -341,7 +326,6 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_name": "SBD_WATCHDOG_TIMEOUT",
                         "option_value": "not int",
                         "allowed_values": "nonnegative integer",
-                        "allowed_values_str": "nonnegative integer",
                     },
                     None
                 )
@@ -363,7 +347,6 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_name": "SBD_WATCHDOG_TIMEOUT",
                         "option_value": None,
                         "allowed_values": "nonnegative integer",
-                        "allowed_values_str": "nonnegative integer",
                     },
                     None
                 )
@@ -584,6 +567,15 @@ class GetClusterSbdStatusTest(CommandSbdTest):
                 ),
                 (
                     Severities.WARNING,
+                    report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+                    {
+                        "node": "node1",
+                        "reason": "reason",
+                        "command": "command",
+                    }
+                ),
+                (
+                    Severities.WARNING,
                     report_codes.UNABLE_TO_GET_SBD_STATUS,
                     {"node": "node1"}
                 ),
@@ -595,18 +587,6 @@ class GetClusterSbdStatusTest(CommandSbdTest):
             ]
         )
 
-    def test_cman_cluster(self, mock_check_sbd, mock_get_nodes):
-        self.mock_env.is_cman_cluster = True
-        assert_raise_library_error(
-            lambda: cmd_sbd.get_cluster_sbd_status(self.mock_env),
-            (
-                Severities.ERROR,
-                report_codes.CMAN_UNSUPPORTED_COMMAND,
-                {}
-            )
-        )
-
-
 @mock.patch("pcs.lib.commands.sbd._get_cluster_nodes")
 @mock.patch("pcs.lib.sbd.get_sbd_config")
 class GetClusterSbdConfigTest(CommandSbdTest):
@@ -718,23 +698,22 @@ invalid value
         self.assertEqual(3, mock_sbd_cfg.call_count)
         assert_report_item_list_equal(
             self.mock_rep.report_item_list,
-            [(
-                Severities.WARNING,
-                report_codes.UNABLE_TO_GET_SBD_CONFIG,
-                {"node": "node2"}
-            )]
-        )
-
-
-    def test_cman_cluster(self, mock_sbd_cfg, mock_get_nodes):
-        self.mock_env.is_cman_cluster = True
-        assert_raise_library_error(
-            lambda: cmd_sbd.get_cluster_sbd_config(self.mock_env),
-            (
-                Severities.ERROR,
-                report_codes.CMAN_UNSUPPORTED_COMMAND,
-                {}
-            )
+            [
+                (
+                    Severities.WARNING,
+                    report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+                    {
+                        "node": "node2",
+                        "reason": "reason",
+                        "command": "command",
+                    }
+                ),
+                (
+                    Severities.WARNING,
+                    report_codes.UNABLE_TO_GET_SBD_CONFIG,
+                    {"node": "node2"}
+                ),
+            ]
         )
 
 
@@ -762,23 +741,10 @@ SBD_WATCHDOG_TIMEOUT=0
         )
         self.assertEqual(1, mock_config.call_count)
 
-    def test_cman_cluster(self, mock_config):
-        self.mock_env.is_cman_cluster = True
-        assert_raise_library_error(
-            lambda: cmd_sbd.get_local_sbd_config(self.mock_env),
-            (
-                Severities.ERROR,
-                report_codes.CMAN_UNSUPPORTED_COMMAND,
-                {}
-            )
-        )
-        self.assertEqual(0, mock_config.call_count)
-
     def test_file_error(self, mock_config):
         self.mock_env.is_cman_cluster = False
         mock_config.side_effect = LibraryError(ReportItem.error(
             report_codes.UNABLE_TO_GET_SBD_CONFIG,
-            "message"
         ))
         assert_raise_library_error(
             lambda: cmd_sbd.get_local_sbd_config(self.mock_env),
diff --git a/pcs/test/test_lib_corosync_live.py b/pcs/test/test_lib_corosync_live.py
index f03d78b..3a4ecce 100644
--- a/pcs/test/test_lib_corosync_live.py
+++ b/pcs/test/test_lib_corosync_live.py
@@ -47,6 +47,31 @@ class GetLocalCorosyncConfTest(TestCase):
         )
 
 
+class GetLocalClusterConfTest(TestCase):
+    def test_success(self):
+        path = rc("cluster.conf")
+        settings.cluster_conf_file = path
+        self.assertEqual(
+            lib.get_local_cluster_conf(),
+            open(path).read()
+        )
+
+    def test_error(self):
+        path = rc("cluster.conf.nonexistent")
+        settings.cluster_conf_file = path
+        assert_raise_library_error(
+            lib.get_local_cluster_conf,
+            (
+                severity.ERROR,
+                report_codes.CLUSTER_CONF_READ_ERROR,
+                {
+                    "path": path,
+                    "reason": "No such file or directory",
+                }
+            )
+        )
+
+
 class SetRemoteCorosyncConfTest(TestCase):
     def test_success(self):
         config = "test {\nconfig: data\n}\n"
diff --git a/pcs/test/test_lib_env.py b/pcs/test/test_lib_env.py
index 205fd60..05c70d4 100644
--- a/pcs/test/test_lib_env.py
+++ b/pcs/test/test_lib_env.py
@@ -21,6 +21,7 @@ from pcs.test.tools.pcs_unittest import mock
 from pcs.lib.env import LibraryEnvironment
 from pcs.common import report_codes
 from pcs.lib import reports
+from pcs.lib.cluster_conf_facade import ClusterConfFacade
 from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade
 from pcs.lib.errors import (
     LibraryError,
@@ -551,7 +552,9 @@ class LibraryEnvironmentTest(TestCase):
         mock_runner.assert_called_once_with(
             self.mock_logger,
             self.mock_reporter,
-            {}
+            {
+                "LC_ALL": "C",
+            }
         )
 
     @mock.patch("pcs.lib.env.CommandRunner")
@@ -569,7 +572,10 @@ class LibraryEnvironmentTest(TestCase):
         mock_runner.assert_called_once_with(
             self.mock_logger,
             self.mock_reporter,
-            {"CIB_user": user}
+            {
+                "CIB_user": user,
+                "LC_ALL": "C",
+            }
         )
 
     @mock.patch("pcs.lib.env.NodeCommunicator")
@@ -610,3 +616,44 @@ class LibraryEnvironmentTest(TestCase):
             user,
             groups
         )
+
+    @mock.patch("pcs.lib.env.get_local_cluster_conf")
+    def test_get_cluster_conf_live(self, mock_get_local_cluster_conf):
+        env = LibraryEnvironment(
+            self.mock_logger, self.mock_reporter, cluster_conf_data=None
+        )
+        mock_get_local_cluster_conf.return_value = "cluster.conf data"
+        self.assertEqual("cluster.conf data", env.get_cluster_conf_data())
+        mock_get_local_cluster_conf.assert_called_once_with()
+
+    @mock.patch("pcs.lib.env.get_local_cluster_conf")
+    def test_get_cluster_conf_not_live(self, mock_get_local_cluster_conf):
+        env = LibraryEnvironment(
+            self.mock_logger, self.mock_reporter, cluster_conf_data="data"
+        )
+        self.assertEqual("data", env.get_cluster_conf_data())
+        self.assertEqual(0, mock_get_local_cluster_conf.call_count)
+
+    @mock.patch.object(
+        LibraryEnvironment,
+        "get_cluster_conf_data",
+        lambda self: "<cluster/>"
+    )
+    def test_get_cluster_conf(self):
+        env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+        facade_obj = env.get_cluster_conf()
+        self.assertTrue(isinstance(facade_obj, ClusterConfFacade))
+        assert_xml_equal(
+            '<cluster/>', etree.tostring(facade_obj._config).decode()
+        )
+
+    def test_is_cluster_conf_live_live(self):
+        env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+        self.assertTrue(env.is_cluster_conf_live)
+
+    def test_is_cluster_conf_live_not_live(self):
+        env = LibraryEnvironment(
+            self.mock_logger, self.mock_reporter, cluster_conf_data="data"
+        )
+        self.assertFalse(env.is_cluster_conf_live)
+
diff --git a/pcs/test/test_lib_external.py b/pcs/test/test_lib_external.py
index 6f60d7c..d900ced 100644
--- a/pcs/test/test_lib_external.py
+++ b/pcs/test/test_lib_external.py
@@ -40,6 +40,11 @@ from pcs.lib.errors import (
 import pcs.lib.external as lib
 
 
+_chkconfig = settings.chkconfig_binary
+_service = settings.service_binary
+_systemctl = settings.systemctl_binary
+
+
 @mock.patch("subprocess.Popen", autospec=True)
 class CommandRunnerTest(TestCase):
     def setUp(self):
@@ -151,7 +156,7 @@ Return value: {1}
         self.assert_popen_called_with(
             mock_popen,
             command,
-            {"env": {"a": "a", "b": "b", "c": "C"}, "stdin": None,}
+            {"env": {"a": "a", "b": "B", "c": "C"}, "stdin": None,}
         )
         logger_calls = [
             mock.call("Running: {0}".format(command_str)),
@@ -1056,7 +1061,7 @@ class DisableServiceTest(TestCase):
         self.mock_runner.run.return_value = ("", "Removed symlink", 0)
         lib.disable_service(self.mock_runner, self.service)
         self.mock_runner.run.assert_called_once_with(
-            ["systemctl", "disable", self.service + ".service"]
+            [_systemctl, "disable", self.service + ".service"]
         )
 
     def test_systemctl_failed(self, mock_is_installed, mock_systemctl):
@@ -1068,7 +1073,7 @@ class DisableServiceTest(TestCase):
             lambda: lib.disable_service(self.mock_runner, self.service)
         )
         self.mock_runner.run.assert_called_once_with(
-            ["systemctl", "disable", self.service + ".service"]
+            [_systemctl, "disable", self.service + ".service"]
         )
 
     def test_not_systemctl(self, mock_is_installed, mock_systemctl):
@@ -1077,7 +1082,7 @@ class DisableServiceTest(TestCase):
         self.mock_runner.run.return_value = ("", "", 0)
         lib.disable_service(self.mock_runner, self.service)
         self.mock_runner.run.assert_called_once_with(
-            ["chkconfig", self.service, "off"]
+            [_chkconfig, self.service, "off"]
         )
 
     def test_not_systemctl_failed(self, mock_is_installed, mock_systemctl):
@@ -1089,7 +1094,7 @@ class DisableServiceTest(TestCase):
             lambda: lib.disable_service(self.mock_runner, self.service)
         )
         self.mock_runner.run.assert_called_once_with(
-            ["chkconfig", self.service, "off"]
+            [_chkconfig, self.service, "off"]
         )
 
     def test_systemctl_not_installed(
@@ -1114,7 +1119,7 @@ class DisableServiceTest(TestCase):
         self.mock_runner.run.return_value = ("", "Removed symlink", 0)
         lib.disable_service(self.mock_runner, self.service, instance="test")
         self.mock_runner.run.assert_called_once_with([
-            "systemctl",
+            _systemctl,
             "disable",
             "{0}@{1}.service".format(self.service, "test")
         ])
@@ -1125,7 +1130,7 @@ class DisableServiceTest(TestCase):
         self.mock_runner.run.return_value = ("", "", 0)
         lib.disable_service(self.mock_runner, self.service, instance="test")
         self.mock_runner.run.assert_called_once_with(
-            ["chkconfig", self.service, "off"]
+            [_chkconfig, self.service, "off"]
         )
 
 @mock.patch("pcs.lib.external.is_systemctl")
@@ -1139,7 +1144,7 @@ class EnableServiceTest(TestCase):
         self.mock_runner.run.return_value = ("", "Created symlink", 0)
         lib.enable_service(self.mock_runner, self.service)
         self.mock_runner.run.assert_called_once_with(
-            ["systemctl", "enable", self.service + ".service"]
+            [_systemctl, "enable", self.service + ".service"]
         )
 
     def test_systemctl_failed(self, mock_systemctl):
@@ -1150,7 +1155,7 @@ class EnableServiceTest(TestCase):
             lambda: lib.enable_service(self.mock_runner, self.service)
         )
         self.mock_runner.run.assert_called_once_with(
-            ["systemctl", "enable", self.service + ".service"]
+            [_systemctl, "enable", self.service + ".service"]
         )
 
     def test_not_systemctl(self, mock_systemctl):
@@ -1158,7 +1163,7 @@ class EnableServiceTest(TestCase):
         self.mock_runner.run.return_value = ("", "", 0)
         lib.enable_service(self.mock_runner, self.service)
         self.mock_runner.run.assert_called_once_with(
-            ["chkconfig", self.service, "on"]
+            [_chkconfig, self.service, "on"]
         )
 
     def test_not_systemctl_failed(self, mock_systemctl):
@@ -1169,7 +1174,7 @@ class EnableServiceTest(TestCase):
             lambda: lib.enable_service(self.mock_runner, self.service)
         )
         self.mock_runner.run.assert_called_once_with(
-            ["chkconfig", self.service, "on"]
+            [_chkconfig, self.service, "on"]
         )
 
     def test_instance_systemctl(self, mock_systemctl):
@@ -1177,7 +1182,7 @@ class EnableServiceTest(TestCase):
         self.mock_runner.run.return_value = ("", "Created symlink", 0)
         lib.enable_service(self.mock_runner, self.service, instance="test")
         self.mock_runner.run.assert_called_once_with([
-            "systemctl",
+            _systemctl,
             "enable",
             "{0}@{1}.service".format(self.service, "test")
         ])
@@ -1187,7 +1192,7 @@ class EnableServiceTest(TestCase):
         self.mock_runner.run.return_value = ("", "", 0)
         lib.enable_service(self.mock_runner, self.service, instance="test")
         self.mock_runner.run.assert_called_once_with(
-            ["chkconfig", self.service, "on"]
+            [_chkconfig, self.service, "on"]
         )
 
 
@@ -1202,7 +1207,7 @@ class StartServiceTest(TestCase):
         self.mock_runner.run.return_value = ("", "", 0)
         lib.start_service(self.mock_runner, self.service)
         self.mock_runner.run.assert_called_once_with(
-            ["systemctl", "start", self.service + ".service"]
+            [_systemctl, "start", self.service + ".service"]
         )
 
     def test_systemctl_failed(self, mock_systemctl):
@@ -1213,7 +1218,7 @@ class StartServiceTest(TestCase):
             lambda: lib.start_service(self.mock_runner, self.service)
         )
         self.mock_runner.run.assert_called_once_with(
-            ["systemctl", "start", self.service + ".service"]
+            [_systemctl, "start", self.service + ".service"]
         )
 
     def test_not_systemctl(self, mock_systemctl):
@@ -1221,7 +1226,7 @@ class StartServiceTest(TestCase):
         self.mock_runner.run.return_value = ("Starting...", "", 0)
         lib.start_service(self.mock_runner, self.service)
         self.mock_runner.run.assert_called_once_with(
-            ["service", self.service, "start"]
+            [_service, self.service, "start"]
         )
 
     def test_not_systemctl_failed(self, mock_systemctl):
@@ -1232,7 +1237,7 @@ class StartServiceTest(TestCase):
             lambda: lib.start_service(self.mock_runner, self.service)
         )
         self.mock_runner.run.assert_called_once_with(
-            ["service", self.service, "start"]
+            [_service, self.service, "start"]
         )
 
     def test_instance_systemctl(self, mock_systemctl):
@@ -1240,7 +1245,7 @@ class StartServiceTest(TestCase):
         self.mock_runner.run.return_value = ("", "", 0)
         lib.start_service(self.mock_runner, self.service, instance="test")
         self.mock_runner.run.assert_called_once_with([
-            "systemctl", "start", "{0}@{1}.service".format(self.service, "test")
+            _systemctl, "start", "{0}@{1}.service".format(self.service, "test")
         ])
 
     def test_instance_not_systemctl(self, mock_systemctl):
@@ -1248,7 +1253,7 @@ class StartServiceTest(TestCase):
         self.mock_runner.run.return_value = ("Starting...", "", 0)
         lib.start_service(self.mock_runner, self.service, instance="test")
         self.mock_runner.run.assert_called_once_with(
-            ["service", self.service, "start"]
+            [_service, self.service, "start"]
         )
 
 
@@ -1263,7 +1268,7 @@ class StopServiceTest(TestCase):
         self.mock_runner.run.return_value = ("", "", 0)
         lib.stop_service(self.mock_runner, self.service)
         self.mock_runner.run.assert_called_once_with(
-            ["systemctl", "stop", self.service + ".service"]
+            [_systemctl, "stop", self.service + ".service"]
         )
 
     def test_systemctl_failed(self, mock_systemctl):
@@ -1274,7 +1279,7 @@ class StopServiceTest(TestCase):
             lambda: lib.stop_service(self.mock_runner, self.service)
         )
         self.mock_runner.run.assert_called_once_with(
-            ["systemctl", "stop", self.service + ".service"]
+            [_systemctl, "stop", self.service + ".service"]
         )
 
     def test_not_systemctl(self, mock_systemctl):
@@ -1282,7 +1287,7 @@ class StopServiceTest(TestCase):
         self.mock_runner.run.return_value = ("Stopping...", "", 0)
         lib.stop_service(self.mock_runner, self.service)
         self.mock_runner.run.assert_called_once_with(
-            ["service", self.service, "stop"]
+            [_service, self.service, "stop"]
         )
 
     def test_not_systemctl_failed(self, mock_systemctl):
@@ -1293,7 +1298,7 @@ class StopServiceTest(TestCase):
             lambda: lib.stop_service(self.mock_runner, self.service)
         )
         self.mock_runner.run.assert_called_once_with(
-            ["service", self.service, "stop"]
+            [_service, self.service, "stop"]
         )
 
     def test_instance_systemctl(self, mock_systemctl):
@@ -1301,7 +1306,7 @@ class StopServiceTest(TestCase):
         self.mock_runner.run.return_value = ("", "", 0)
         lib.stop_service(self.mock_runner, self.service, instance="test")
         self.mock_runner.run.assert_called_once_with([
-            "systemctl", "stop", "{0}@{1}.service".format(self.service, "test")
+            _systemctl, "stop", "{0}@{1}.service".format(self.service, "test")
         ])
 
     def test_instance_not_systemctl(self, mock_systemctl):
@@ -1309,7 +1314,7 @@ class StopServiceTest(TestCase):
         self.mock_runner.run.return_value = ("Stopping...", "", 0)
         lib.stop_service(self.mock_runner, self.service, instance="test")
         self.mock_runner.run.assert_called_once_with(
-            ["service", self.service, "stop"]
+            [_service, self.service, "stop"]
         )
 
 
@@ -1383,7 +1388,7 @@ class IsServiceEnabledTest(TestCase):
         self.mock_runner.run.return_value = ("enabled\n", "", 0)
         self.assertTrue(lib.is_service_enabled(self.mock_runner, self.service))
         self.mock_runner.run.assert_called_once_with(
-            ["systemctl", "is-enabled", self.service + ".service"]
+            [_systemctl, "is-enabled", self.service + ".service"]
         )
 
     def test_systemctl_disabled(self, mock_systemctl):
@@ -1391,7 +1396,7 @@ class IsServiceEnabledTest(TestCase):
         self.mock_runner.run.return_value = ("disabled\n", "", 2)
         self.assertFalse(lib.is_service_enabled(self.mock_runner, self.service))
         self.mock_runner.run.assert_called_once_with(
-            ["systemctl", "is-enabled", self.service + ".service"]
+            [_systemctl, "is-enabled", self.service + ".service"]
         )
 
     def test_not_systemctl_enabled(self, mock_systemctl):
@@ -1399,7 +1404,7 @@ class IsServiceEnabledTest(TestCase):
         self.mock_runner.run.return_value = ("", "", 0)
         self.assertTrue(lib.is_service_enabled(self.mock_runner, self.service))
         self.mock_runner.run.assert_called_once_with(
-            ["chkconfig", self.service]
+            [_chkconfig, self.service]
         )
 
     def test_not_systemctl_disabled(self, mock_systemctl):
@@ -1407,7 +1412,7 @@ class IsServiceEnabledTest(TestCase):
         self.mock_runner.run.return_value = ("", "", 3)
         self.assertFalse(lib.is_service_enabled(self.mock_runner, self.service))
         self.mock_runner.run.assert_called_once_with(
-            ["chkconfig", self.service]
+            [_chkconfig, self.service]
         )
 
 
@@ -1422,7 +1427,7 @@ class IsServiceRunningTest(TestCase):
         self.mock_runner.run.return_value = ("active", "", 0)
         self.assertTrue(lib.is_service_running(self.mock_runner, self.service))
         self.mock_runner.run.assert_called_once_with(
-            ["systemctl", "is-active", self.service + ".service"]
+            [_systemctl, "is-active", self.service + ".service"]
         )
 
     def test_systemctl_not_running(self, mock_systemctl):
@@ -1430,7 +1435,7 @@ class IsServiceRunningTest(TestCase):
         self.mock_runner.run.return_value = ("inactive", "", 2)
         self.assertFalse(lib.is_service_running(self.mock_runner, self.service))
         self.mock_runner.run.assert_called_once_with(
-            ["systemctl", "is-active", self.service + ".service"]
+            [_systemctl, "is-active", self.service + ".service"]
         )
 
     def test_not_systemctl_running(self, mock_systemctl):
@@ -1438,7 +1443,7 @@ class IsServiceRunningTest(TestCase):
         self.mock_runner.run.return_value = ("is running", "", 0)
         self.assertTrue(lib.is_service_running(self.mock_runner, self.service))
         self.mock_runner.run.assert_called_once_with(
-            ["service", self.service, "status"]
+            [_service, self.service, "status"]
         )
 
     def test_not_systemctl_not_running(self, mock_systemctl):
@@ -1446,7 +1451,7 @@ class IsServiceRunningTest(TestCase):
         self.mock_runner.run.return_value = ("is stopped", "", 3)
         self.assertFalse(lib.is_service_running(self.mock_runner, self.service))
         self.mock_runner.run.assert_called_once_with(
-            ["service", self.service, "status"]
+            [_service, self.service, "status"]
         )
 
 
@@ -1523,7 +1528,7 @@ pacemaker.service                           enabled
         )
         self.assertEqual(mock_is_systemctl.call_count, 1)
         self.mock_runner.run.assert_called_once_with(
-            ["systemctl", "list-unit-files", "--full"]
+            [_systemctl, "list-unit-files", "--full"]
         )
 
     def test_failed(self, mock_is_systemctl):
@@ -1532,7 +1537,7 @@ pacemaker.service                           enabled
         self.assertEqual(lib.get_systemd_services(self.mock_runner), [])
         self.assertEqual(mock_is_systemctl.call_count, 1)
         self.mock_runner.run.assert_called_once_with(
-            ["systemctl", "list-unit-files", "--full"]
+            [_systemctl, "list-unit-files", "--full"]
         )
 
     def test_not_systemd(self, mock_is_systemctl):
@@ -1559,14 +1564,14 @@ pacemaker      	0:off	1:off	2:off	3:off	4:off	5:off	6:off
             ["pcsd", "sbd", "pacemaker"]
         )
         self.assertEqual(mock_is_systemctl.call_count, 1)
-        self.mock_runner.run.assert_called_once_with(["chkconfig"])
+        self.mock_runner.run.assert_called_once_with([_chkconfig])
 
     def test_failed(self, mock_is_systemctl):
         mock_is_systemctl.return_value = False
         self.mock_runner.run.return_value = ("stdout", "failed", 1)
         self.assertEqual(lib.get_non_systemd_services(self.mock_runner), [])
         self.assertEqual(mock_is_systemctl.call_count, 1)
-        self.mock_runner.run.assert_called_once_with(["chkconfig"])
+        self.mock_runner.run.assert_called_once_with([_chkconfig])
 
     def test_systemd(self, mock_is_systemctl):
         mock_is_systemctl.return_value = True
@@ -1591,3 +1596,4 @@ class EnsureIsSystemctlTest(TestCase):
                 {}
             )
         )
+
diff --git a/pcs/test/test_lib_resource_agent.py b/pcs/test/test_lib_resource_agent.py
deleted file mode 100644
index a569e66..0000000
--- a/pcs/test/test_lib_resource_agent.py
+++ /dev/null
@@ -1,893 +0,0 @@
-from __future__ import (
-    absolute_import,
-    division,
-    print_function,
-    unicode_literals,
-)
-
-from pcs.test.tools.pcs_unittest import TestCase
-import os.path
-
-from lxml import etree
-
-from pcs.test.tools.assertions import (
-    ExtendedAssertionsMixin,
-    assert_xml_equal,
-)
-from pcs.test.tools.pcs_unittest import mock
-from pcs.test.tools.xml import XmlManipulation as XmlMan
-
-
-from pcs import settings
-from pcs.lib import resource_agent as lib_ra
-from pcs.lib.external import CommandRunner
-
-
-class LibraryResourceTest(TestCase, ExtendedAssertionsMixin):
-    pass
-
-
-class GetParameterTest(LibraryResourceTest):
-    def test_with_all_data(self):
-        xml = """
-            <parameter name="test_param" required="1">
-                <longdesc>
-                    Long description
-                </longdesc>
-                <shortdesc>short description</shortdesc>
-                <content type="test_type" default="default_value" />
-            </parameter>
-        """
-        self.assertEqual(
-            {
-                "name": "test_param",
-                "longdesc": "Long description",
-                "shortdesc": "short description",
-                "type": "test_type",
-                "required": True,
-                "default": "default_value"
-            },
-            lib_ra._get_parameter(etree.XML(xml))
-        )
-
-    def test_minimal_data(self):
-        xml = '<parameter name="test_param" />'
-        self.assertEqual(
-            {
-                "name": "test_param",
-                "longdesc": "",
-                "shortdesc": "",
-                "type": "string",
-                "required": False,
-                "default": None
-            },
-            lib_ra._get_parameter(etree.XML(xml))
-        )
-
-    def test_no_name(self):
-        xml = '<parameter />'
-        self.assertRaises(
-            lib_ra.InvalidMetadataFormat,
-            lambda: lib_ra._get_parameter(etree.XML(xml))
-        )
-
-    def test_invalid_element(self):
-        xml = """
-            <param name="test_param" required="1">
-                <longdesc>
-                    Long description
-                </longdesc>
-                <shortdesc>short description</shortdesc>
-                <content type="test_type" default="default_value" />
-            </param>
-        """
-        self.assertRaises(
-            lib_ra.InvalidMetadataFormat,
-            lambda: lib_ra._get_parameter(etree.XML(xml))
-        )
-
-
-class GetAgentParametersTest(LibraryResourceTest):
-    def test_all_data(self):
-        xml = """
-            <resource-agent>
-                <parameters>
-                    <parameter name="test_param" required="1">
-                        <longdesc>
-                            Long description
-                        </longdesc>
-                        <shortdesc>short description</shortdesc>
-                        <content type="test_type" default="default_value" />
-                    </parameter>
-                    <parameter name="another parameter"/>
-                </parameters>
-            </resource-agent>
-        """
-        self.assertEqual(
-            [
-                {
-                    "name": "test_param",
-                    "longdesc": "Long description",
-                    "shortdesc": "short description",
-                    "type": "test_type",
-                    "required": True,
-                    "default": "default_value"
-                },
-                {
-                    "name": "another parameter",
-                    "longdesc": "",
-                    "shortdesc": "",
-                    "type": "string",
-                    "required": False,
-                    "default": None
-                }
-            ],
-            lib_ra._get_agent_parameters(etree.XML(xml))
-        )
-
-    def test_empty_parameters(self):
-        xml = """
-            <resource-agent>
-                <parameters />
-            </resource-agent>
-        """
-        self.assertEqual(0, len(lib_ra._get_agent_parameters(etree.XML(xml))))
-
-    def test_no_parameters(self):
-        xml = """
-            <resource-agent>
-                <longdesc />
-            </resource-agent>
-        """
-        self.assertEqual(0, len(lib_ra._get_agent_parameters(etree.XML(xml))))
-
-    def test_invalid_format(self):
-        xml = """
-            <resource-agent>
-                <parameters>
-                    <parameter />
-                </parameters>
-            </resource-agent>
-        """
-        self.assertRaises(
-            lib_ra.InvalidMetadataFormat,
-            lambda: lib_ra._get_agent_parameters(etree.XML(xml))
-        )
-
-
-class GetFenceAgentMetadataTest(LibraryResourceTest):
-    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
-    def test_invalid_agent_name(self, mock_obj):
-        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_obj.return_value = True
-        agent_name = "agent"
-        self.assert_raises(
-            lib_ra.AgentNotFound,
-            lambda: lib_ra.get_fence_agent_metadata(mock_runner, agent_name),
-            {"agent": agent_name}
-        )
-
-        mock_runner.run.assert_not_called()
-
-    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
-    def test_relative_path_name(self, mock_obj):
-        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_obj.return_value = True
-        agent_name = "fence_agent/../fence"
-        self.assert_raises(
-            lib_ra.AgentNotFound,
-            lambda: lib_ra.get_fence_agent_metadata(mock_runner, agent_name),
-            {"agent": agent_name}
-        )
-
-        mock_runner.run.assert_not_called()
-
-    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
-    def test_not_runnable(self, mock_obj):
-        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_obj.return_value = False
-        agent_name = "fence_agent"
-
-        self.assert_raises(
-            lib_ra.AgentNotFound,
-            lambda: lib_ra.get_fence_agent_metadata(mock_runner, agent_name),
-            {"agent": agent_name}
-        )
-        mock_runner.run.assert_not_called()
-
-    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
-    def test_execution_failed(self, mock_is_runnable):
-        mock_is_runnable.return_value = True
-        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = ("", "error", 1)
-        agent_name = "fence_ipmi"
-
-        self.assert_raises(
-            lib_ra.UnableToGetAgentMetadata,
-            lambda: lib_ra.get_fence_agent_metadata(mock_runner, agent_name),
-            {"agent": agent_name}
-        )
-
-        script_path = os.path.join(settings.fence_agent_binaries, agent_name)
-        mock_runner.run.assert_called_once_with(
-            [script_path, "-o", "metadata"]
-        )
-
-    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
-    def test_invalid_xml(self, mock_is_runnable):
-        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = ("not xml", "", 0)
-        mock_is_runnable.return_value = True
-        agent_name = "fence_ipmi"
-        self.assert_raises(
-            lib_ra.UnableToGetAgentMetadata,
-            lambda: lib_ra.get_fence_agent_metadata(mock_runner, agent_name),
-            {"agent": agent_name}
-        )
-
-        script_path = os.path.join(settings.fence_agent_binaries, agent_name)
-        mock_runner.run.assert_called_once_with(
-            [script_path, "-o", "metadata"]
-        )
-
-    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
-    def test_success(self, mock_is_runnable):
-        agent_name = "fence_ipmi"
-        xml = "<xml />"
-        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (xml, "", 0)
-        mock_is_runnable.return_value = True
-
-        out_dom = lib_ra.get_fence_agent_metadata(mock_runner, agent_name)
-
-        script_path = os.path.join(settings.fence_agent_binaries, agent_name)
-        mock_runner.run.assert_called_once_with(
-            [script_path, "-o", "metadata"]
-        )
-        assert_xml_equal(xml, str(XmlMan(out_dom)))
-
-
-class GetOcfResourceAgentMetadataTest(LibraryResourceTest):
-    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
-    def test_relative_path_provider(self, mock_is_runnable):
-        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_is_runnable.return_value = True
-        provider = "provider/../provider2"
-        agent = "agent"
-
-        self.assert_raises(
-            lib_ra.AgentNotFound,
-            lambda: lib_ra._get_ocf_resource_agent_metadata(
-                mock_runner, provider, agent
-            ),
-            {"agent": "ocf:{0}:{1}".format(provider, agent)}
-        )
-
-        mock_runner.run.assert_not_called()
-
-    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
-    def test_relative_path_agent(self, mock_is_runnable):
-        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_is_runnable.return_value = True
-        provider = "provider"
-        agent = "agent/../agent2"
-
-        self.assert_raises(
-            lib_ra.AgentNotFound,
-            lambda: lib_ra._get_ocf_resource_agent_metadata(
-                mock_runner, provider, agent
-            ),
-            {"agent": "ocf:{0}:{1}".format(provider, agent)}
-        )
-
-        mock_runner.run.assert_not_called()
-
-    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
-    def test_not_runnable(self, mock_is_runnable):
-        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_is_runnable.return_value = False
-        provider = "provider"
-        agent = "agent"
-
-        self.assert_raises(
-            lib_ra.AgentNotFound,
-            lambda: lib_ra._get_ocf_resource_agent_metadata(
-                mock_runner, provider, agent
-            ),
-            {"agent": "ocf:{0}:{1}".format(provider, agent)}
-        )
-
-        mock_runner.run.assert_not_called()
-
-    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
-    def test_execution_failed(self, mock_is_runnable):
-        provider = "provider"
-        agent = "agent"
-        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = ("", "error", 1)
-        mock_is_runnable.return_value = True
-
-        self.assert_raises(
-            lib_ra.UnableToGetAgentMetadata,
-            lambda: lib_ra._get_ocf_resource_agent_metadata(
-                mock_runner, provider, agent
-            ),
-            {"agent": "ocf:{0}:{1}".format(provider, agent)}
-        )
-
-        script_path = os.path.join(settings.ocf_resources, provider, agent)
-        mock_runner.run.assert_called_once_with(
-            [script_path, "meta-data"],
-            env_extend={"OCF_ROOT": settings.ocf_root}
-        )
-
-    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
-    def test_invalid_xml(self, mock_is_runnable):
-        provider = "provider"
-        agent = "agent"
-        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = ("not xml", "", 0)
-        mock_is_runnable.return_value = True
-
-        self.assert_raises(
-            lib_ra.UnableToGetAgentMetadata,
-            lambda: lib_ra._get_ocf_resource_agent_metadata(
-                mock_runner, provider, agent
-            ),
-            {"agent": "ocf:{0}:{1}".format(provider, agent)}
-        )
-
-        script_path = os.path.join(settings.ocf_resources, provider, agent)
-        mock_runner.run.assert_called_once_with(
-            [script_path, "meta-data"],
-            env_extend={"OCF_ROOT": settings.ocf_root}
-        )
-
-    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
-    def test_success(self, mock_is_runnable):
-        provider = "provider"
-        agent = "agent"
-        xml = "<xml />"
-        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (xml, "", 0)
-        mock_is_runnable.return_value = True
-
-        out_dom = lib_ra._get_ocf_resource_agent_metadata(
-            mock_runner, provider, agent
-        )
-
-        script_path = os.path.join(settings.ocf_resources, provider, agent)
-        mock_runner.run.assert_called_once_with(
-            [script_path, "meta-data"],
-            env_extend={"OCF_ROOT": settings.ocf_root}
-        )
-        assert_xml_equal(xml, str(XmlMan(out_dom)))
-
-
-class GetNagiosResourceAgentMetadataTest(LibraryResourceTest):
-    def test_relative_path_name(self):
-        agent = "agent/../agent2"
-        self.assert_raises(
-            lib_ra.AgentNotFound,
-            lambda: lib_ra._get_nagios_resource_agent_metadata(agent),
-            {"agent": "nagios:" + agent}
-        )
-
-    @mock.patch("lxml.etree.parse")
-    def test_file_opening_exception(self, mock_obj):
-        agent = "agent"
-        mock_obj.side_effect = IOError()
-        self.assert_raises(
-            lib_ra.UnableToGetAgentMetadata,
-            lambda: lib_ra._get_nagios_resource_agent_metadata(agent),
-            {"agent": "nagios:" + agent}
-        )
-
-    @mock.patch("lxml.etree.parse")
-    def test_invalid_xml(self, mock_obj):
-        agent = "agent"
-        mock_obj.side_effect = etree.XMLSyntaxError(None, None, None, None)
-        self.assert_raises(
-            lib_ra.UnableToGetAgentMetadata,
-            lambda: lib_ra._get_nagios_resource_agent_metadata(agent),
-            {"agent": "nagios:" + agent}
-        )
-
-    @mock.patch("lxml.etree.parse")
-    def test_success(self, mock_obj):
-        agent = "agent"
-        xml = "<xml />"
-        mock_obj.return_value = etree.ElementTree(etree.XML(xml))
-        out_dom = lib_ra._get_nagios_resource_agent_metadata(agent)
-        metadata_path = os.path.join(
-            settings.nagios_metadata_path, agent + ".xml"
-        )
-
-        mock_obj.assert_called_once_with(metadata_path)
-        assert_xml_equal(xml, str(XmlMan(out_dom)))
-
-
-class GetAgentDescTest(LibraryResourceTest):
-    def test_invalid_metadata_format(self):
-        xml = "<xml />"
-        self.assertRaises(
-            lib_ra.InvalidMetadataFormat,
-            lambda: lib_ra.get_agent_desc(etree.XML(xml))
-        )
-
-    def test_no_desc(self):
-        xml = "<resource-agent />"
-        expected = {
-            "longdesc": "",
-            "shortdesc": ""
-        }
-        self.assertEqual(expected, lib_ra.get_agent_desc(etree.XML(xml)))
-
-    def test_shortdesc_attribute(self):
-        xml = '<resource-agent shortdesc="short description" />'
-        expected = {
-            "longdesc": "",
-            "shortdesc": "short description"
-        }
-        self.assertEqual(expected, lib_ra.get_agent_desc(etree.XML(xml)))
-
-    def test_shortdesc_element(self):
-        xml = """
-            <resource-agent>
-                <shortdesc>short description</shortdesc>
-            </resource-agent>
-        """
-        expected = {
-            "longdesc": "",
-            "shortdesc": "short description"
-        }
-        self.assertEqual(expected, lib_ra.get_agent_desc(etree.XML(xml)))
-
-    def test_longdesc(self):
-        xml = """
-            <resource-agent>
-                <longdesc>long description</longdesc>
-            </resource-agent>
-        """
-        expected = {
-            "longdesc": "long description",
-            "shortdesc": ""
-        }
-        self.assertEqual(expected, lib_ra.get_agent_desc(etree.XML(xml)))
-
-    def test_longdesc_and_shortdesc_attribute(self):
-        xml = """
-            <resource-agent shortdesc="short_desc">
-                <longdesc>long description</longdesc>
-            </resource-agent>
-        """
-        expected = {
-            "longdesc": "long description",
-            "shortdesc": "short_desc"
-        }
-        self.assertEqual(expected, lib_ra.get_agent_desc(etree.XML(xml)))
-
-    def test_longdesc_and_shortdesc_element(self):
-        xml = """
-            <resource-agent>
-                <shortdesc>short_desc</shortdesc>
-                <longdesc>long description</longdesc>
-            </resource-agent>
-        """
-        expected = {
-            "longdesc": "long description",
-            "shortdesc": "short_desc"
-        }
-        self.assertEqual(expected, lib_ra.get_agent_desc(etree.XML(xml)))
-
-
-class FilterFenceAgentParametersTest(LibraryResourceTest):
-    def test_filter(self):
-        params = [
-            {"name": "debug"},
-            {"name": "valid_param"},
-            {"name": "verbose"},
-            {"name": "help"},
-            {"name": "action"},
-            {"name": "another_param"},
-            {"name": "version"},
-        ]
-        self.assertEqual(
-            [
-                {"name": "valid_param"},
-                {
-                    "name": "action",
-                    "required": False,
-                    "shortdesc":
-                        "\nWARNING: specifying 'action' is deprecated and not" +
-                        " necessary with current Pacemaker versions"
-                },
-                {"name": "another_param"}
-            ],
-            lib_ra._filter_fence_agent_parameters(params)
-        )
-
-    def test_action(self):
-        params = [
-            {
-                "name": "action",
-                "required": True,
-                "shortdesc": "Action"
-            }
-        ]
-
-        self.assertEqual(
-            [
-                {
-                    "name": "action",
-                    "required": False,
-                    "shortdesc":
-                        "Action\nWARNING: specifying 'action' is deprecated " +
-                        "and not necessary with current Pacemaker versions"
-                }
-            ],
-            lib_ra._filter_fence_agent_parameters(params)
-        )
-
-
-class GetResourceAgentMetadata(LibraryResourceTest):
-    def test_unsupported_class(self):
-        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        agent = "class:provider:agent"
-        self.assert_raises(
-            lib_ra.UnsupportedResourceAgent,
-            lambda: lib_ra.get_resource_agent_metadata(mock_runner, agent),
-            {"agent": agent}
-        )
-
-        mock_runner.run.assert_not_called()
-
-    def test_ocf_no_provider(self):
-        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        agent = "ocf:agent"
-
-        self.assert_raises(
-            lib_ra.UnsupportedResourceAgent,
-            lambda: lib_ra.get_resource_agent_metadata(mock_runner, agent),
-            {"agent": agent}
-        )
-
-        mock_runner.run.assert_not_called()
-
-    @mock.patch("pcs.lib.resource_agent._get_ocf_resource_agent_metadata")
-    def test_ocf_ok(self, mock_obj):
-        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        agent = "ocf:provider:agent"
-
-        lib_ra.get_resource_agent_metadata(mock_runner, agent)
-
-        mock_obj.assert_called_once_with(mock_runner, "provider", "agent")
-
-    @mock.patch("pcs.lib.resource_agent._get_nagios_resource_agent_metadata")
-    def test_nagios_ok(self, mock_obj):
-        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        agent = "nagios:agent"
-
-        lib_ra.get_resource_agent_metadata(mock_runner, agent)
-
-        mock_obj.assert_called_once_with("agent")
-        mock_runner.run.assert_not_called()
-
-
-class GetPcmkAdvancedStonithParametersTest(LibraryResourceTest):
-    def test_all_advanced(self):
-        xml = """
-            <resource-agent>
-                <parameters>
-                    <parameter name="test_param" required="0">
-                        <longdesc>
-                             Long description
-                        </longdesc>
-                        <shortdesc>
-                             Advanced use only: short description
-                        </shortdesc>
-                        <content type="test_type" default="default_value" />
-                    </parameter>
-                    <parameter name="another parameter"/>
-                </parameters>
-            </resource-agent>
-        """
-        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (xml, "", 0)
-        self.assertEqual(
-            [
-                {
-                    "name": "test_param",
-                    "longdesc":
-                        "Advanced use only: short description\nLong "
-                        "description",
-                    "shortdesc": "Advanced use only: short description",
-                    "type": "test_type",
-                    "required": False,
-                    "default": "default_value",
-                    "advanced": True
-                },
-                {
-                    "name": "another parameter",
-                    "longdesc": "",
-                    "shortdesc": "",
-                    "type": "string",
-                    "required": False,
-                    "default": None,
-                    "advanced": False
-                }
-            ],
-            lib_ra._get_pcmk_advanced_stonith_parameters(mock_runner)
-        )
-        mock_runner.run.assert_called_once_with(
-            [settings.stonithd_binary, "metadata"]
-        )
-
-    def test_failed_to_get_xml(self):
-        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = ("", "some error", 1)
-        self.assert_raises(
-            lib_ra.UnableToGetAgentMetadata,
-            lambda: lib_ra._get_pcmk_advanced_stonith_parameters(mock_runner),
-            {"agent": "stonithd"}
-        )
-
-        mock_runner.run.assert_called_once_with(
-            [settings.stonithd_binary, "metadata"]
-        )
-
-    def test_invalid_xml(self):
-        mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = ("invalid XML", "", 0)
-        self.assertRaises(
-            lib_ra.InvalidMetadataFormat,
-            lambda: lib_ra._get_pcmk_advanced_stonith_parameters(mock_runner)
-        )
-
-        mock_runner.run.assert_called_once_with(
-            [settings.stonithd_binary, "metadata"]
-        )
-
-
-class GetActionTest(LibraryResourceTest):
-    def test_name_and_params(self):
-        xml = '''
-            <action name="required" param="value" another_param="same_value" />
-        '''
-        self.assertEqual(
-            lib_ra._get_action(etree.XML(xml)),
-            {
-                "name": "required",
-                "another_param": "same_value",
-                "param": "value"
-            }
-        )
-
-    def test_name_only(self):
-        xml = '''
-            <action name="required" />
-        '''
-        self.assertEqual(
-            lib_ra._get_action(etree.XML(xml)), {"name": "required"}
-        )
-
-    def test_empty(self):
-        xml = '<action />'
-        self.assertRaises(
-            lib_ra.InvalidMetadataFormat,
-            lambda: lib_ra._get_action(etree.XML(xml))
-        )
-
-    def test_no_name(self):
-        xml = '<action param="value" another_param="same_value" />'
-        self.assertRaises(
-            lib_ra.InvalidMetadataFormat,
-            lambda: lib_ra._get_action(etree.XML(xml))
-        )
-
-    def test_not_action_element(self):
-        xml = '<actions param="value" another_param="same_value" />'
-        self.assertRaises(
-            lib_ra.InvalidMetadataFormat,
-            lambda: lib_ra._get_action(etree.XML(xml))
-        )
-
-
-class GetAgentActionsTest(LibraryResourceTest):
-    def test_multiple_actions(self):
-        xml = """
-            <resource-agent>
-                <actions>
-                    <action name="on" automatic="0"/>
-                    <action name="off" />
-                    <action name="reboot" />
-                    <action name="status" />
-                </actions>
-            </resource-agent>
-        """
-        self.assertEqual(
-            lib_ra.get_agent_actions(etree.XML(xml)),
-            [
-                {
-                    "name": "on",
-                    "automatic": "0"
-                },
-                {"name": "off"},
-                {"name": "reboot"},
-                {"name": "status"}
-            ]
-        )
-
-    def test_root_is_not_resource_agent(self):
-        xml = """
-            <agent>
-                <actions>
-                    <action name="on" automatic="0"/>
-                    <action name="off" />
-                </actions>
-            </agent>
-        """
-        self.assertRaises(
-            lib_ra.InvalidMetadataFormat,
-            lambda: lib_ra._get_action(etree.XML(xml))
-        )
-
-    def test_action_without_name(self):
-        xml = """
-            <resource-agent>
-                <actions>
-                    <action name="on" automatic="0"/>
-                    <action />
-                    <action name="reboot" />
-                    <action name="status" />
-                </actions>
-            </resource-agent>
-        """
-        self.assertRaises(
-            lib_ra.InvalidMetadataFormat,
-            lambda: lib_ra._get_action(etree.XML(xml))
-        )
-
-    def test_empty_actions(self):
-        xml = """
-            <resource-agent>
-                <actions />
-            </resource-agent>
-        """
-        self.assertEqual(len(lib_ra.get_agent_actions(etree.XML(xml))), 0)
-
-    def test_no_actions(self):
-        xml = "<resource-agent />"
-        self.assertEqual(len(lib_ra.get_agent_actions(etree.XML(xml))), 0)
-
-
-class ValidateResourceInstanceAttributesTest(LibraryResourceTest):
-    def setUp(self):
-        self.xml = etree.XML("<xml />")
-        self.params = [
-            {
-                "name": "test_param",
-                "longdesc": "Long description",
-                "shortdesc": "short description",
-                "type": "string",
-                "required": False,
-                "default": "default_value"
-            },
-            {
-                "name": "required_param",
-                "longdesc": "",
-                "shortdesc": "",
-                "type": "boolean",
-                "required": True,
-                "default": None
-            },
-            {
-                "name": "another parameter",
-                "longdesc": "",
-                "shortdesc": "",
-                "type": "string",
-                "required": True,
-                "default": None
-            }
-        ]
-
-    def test_only_required(self):
-        attrs = ["another parameter", "required_param"]
-        self.assertEqual(
-            lib_ra._validate_instance_attributes(self.params, attrs),
-            ([], [])
-        )
-
-    def test_optional(self):
-        attrs = ["another parameter", "required_param", "test_param"]
-        self.assertEqual(
-            lib_ra._validate_instance_attributes(self.params, attrs),
-            ([], [])
-        )
-
-    def test_bad_attrs(self):
-        attrs = ["another parameter", "required_param", "unknown_param"]
-        self.assertEqual(
-            lib_ra._validate_instance_attributes(self.params, attrs),
-            (["unknown_param"], [])
-        )
-
-    def test_bad_attrs_and_missing_required(self):
-        attrs = ["unknown_param", "test_param"]
-        bad, missing = lib_ra._validate_instance_attributes(self.params, attrs)
-        self.assertEqual(["unknown_param"], bad)
-        self.assertEqual(
-            sorted(["another parameter", "required_param"]),
-            sorted(missing)
-        )
-
-
- at mock.patch("pcs.lib.resource_agent._validate_instance_attributes")
- at mock.patch("pcs.lib.resource_agent.get_fence_agent_parameters")
- at mock.patch("pcs.lib.resource_agent.get_fence_agent_metadata")
- at mock.patch("pcs.lib.resource_agent.get_resource_agent_parameters")
- at mock.patch("pcs.lib.resource_agent.get_resource_agent_metadata")
-class ValidateInstanceAttributesTest(LibraryResourceTest):
-    def setUp(self):
-        self.runner = mock.MagicMock(spec_set=CommandRunner)
-        self.valid_ret_val = (
-            ["test_parm", "another"], ["nothing here", "port"]
-        )
-        self.xml = etree.XML("<xml />")
-        self.instance_attrs = ["param", "another_one"]
-        self.attrs = [
-            {
-                "name": "test_param",
-                "longdesc": "Long description",
-                "shortdesc": "short description",
-                "type": "string",
-                "required": False,
-                "default": "default_value"
-            },
-            {
-                "name": "required_param",
-                "longdesc": "",
-                "shortdesc": "",
-                "type": "boolean",
-                "required": True,
-                "default": None
-            }
-        ]
-
-    def test_resource(
-        self, res_met_mock, res_par_mock, fen_met_mock, fen_par_mock, valid_mock
-    ):
-        agent = "ocf:pacemaker:Dummy"
-        res_met_mock.return_value = self.xml
-        res_par_mock.return_value = self.attrs
-        valid_mock.return_value = self.valid_ret_val
-        self.assertEqual(
-            self.valid_ret_val,
-            lib_ra.validate_instance_attributes(
-                self.runner, self.instance_attrs, agent
-            )
-        )
-        res_met_mock.assert_called_once_with(self.runner, agent)
-        res_par_mock.assert_called_once_with(self.xml)
-        valid_mock.assert_called_once_with(self.attrs, self.instance_attrs)
-        fen_met_mock.assert_not_called()
-        fen_par_mock.assert_not_called()
-
-    def test_fence(
-        self, res_met_mock, res_par_mock, fen_met_mock, fen_par_mock, valid_mock
-    ):
-        agent = "stonith:fence_test"
-        fen_met_mock.return_value = self.xml
-        fen_par_mock.return_value = self.attrs
-        valid_mock.return_value = self.valid_ret_val
-        self.assertEqual(
-            (["test_parm", "another"], ["nothing here"]),
-            lib_ra.validate_instance_attributes(
-                self.runner, self.instance_attrs, agent
-            )
-        )
-        fen_met_mock.assert_called_once_with(self.runner, "fence_test")
-        fen_par_mock.assert_called_once_with(self.runner, self.xml)
-        valid_mock.assert_called_once_with(self.attrs, self.instance_attrs)
-        res_met_mock.assert_not_called()
-        res_par_mock.assert_not_called()
diff --git a/pcs/test/test_lib_sbd.py b/pcs/test/test_lib_sbd.py
index 9b7b801..106d89b 100644
--- a/pcs/test/test_lib_sbd.py
+++ b/pcs/test/test_lib_sbd.py
@@ -51,11 +51,9 @@ class RunParallelAndRaiseLibErrorOnFailureTest(TestCase):
                 raise LibraryError(
                     ReportItem.error(
                         report_codes.COMMON_ERROR,
-                        "another report"
                     ),
                     ReportItem.info(
                         report_codes.COMMON_INFO,
-                        "just info"
                     )
                 )
 
@@ -701,23 +699,47 @@ class GetSbdConfigTest(TestCase):
         )
 
 
+ at mock.patch("pcs.lib.external.is_systemctl")
+class GetSbdServiceNameTest(TestCase):
+    def test_systemctl(self, mock_is_systemctl):
+        mock_is_systemctl.return_value = True
+        self.assertEqual("sbd", lib_sbd.get_sbd_service_name())
+        mock_is_systemctl.assert_called_once_with()
+
+    def test_not_systemctl(self, mock_is_systemctl):
+        mock_is_systemctl.return_value = False
+        self.assertEqual("sbd_helper", lib_sbd.get_sbd_service_name())
+        mock_is_systemctl.assert_called_once_with()
+
+
+ at mock.patch("pcs.lib.sbd.get_sbd_service_name")
 @mock.patch("pcs.lib.external.is_service_enabled")
 class IsSbdEnabledTest(TestCase):
-    def test_success(self, mock_is_service_enabled):
+    def test_success(self, mock_is_service_enabled, mock_sbd_name):
         mock_obj = mock.MagicMock()
         mock_is_service_enabled.return_value = True
+        mock_sbd_name.return_value = "sbd"
         self.assertTrue(lib_sbd.is_sbd_enabled(mock_obj))
+        mock_is_service_enabled.assert_called_once_with(mock_obj, "sbd")
+        mock_sbd_name.assert_called_once_with()
 
 
+ at mock.patch("pcs.lib.sbd.get_sbd_service_name")
 @mock.patch("pcs.lib.external.is_service_installed")
 class IsSbdInstalledTest(TestCase):
-    def test_installed(self, mock_is_service_installed):
+    def test_installed(self, mock_is_service_installed, mock_sbd_name):
         mock_obj = mock.MagicMock()
         mock_is_service_installed.return_value = True
+        mock_sbd_name.return_value = "sbd"
         self.assertTrue(lib_sbd.is_sbd_installed(mock_obj))
+        mock_is_service_installed.assert_called_once_with(mock_obj, "sbd")
+        mock_sbd_name.assert_called_once_with()
 
-    def test_not_installed(self, mock_is_service_installed):
+    def test_not_installed(self, mock_is_service_installed, mock_sbd_name):
         mock_obj = mock.MagicMock()
         mock_is_service_installed.return_value = False
+        mock_sbd_name.return_value = "sbd"
         self.assertFalse(lib_sbd.is_sbd_installed(mock_obj))
+        mock_is_service_installed.assert_called_once_with(mock_obj, "sbd")
+        mock_sbd_name.assert_called_once_with()
 
diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
index 556a9c3..6c62676 100644
--- a/pcs/test/test_resource.py
+++ b/pcs/test/test_resource.py
@@ -6,19 +6,20 @@ from __future__ import (
 )
 
 import os
-import shutil
 import re
-from pcs.test.tools import pcs_unittest as unittest
+import shutil
 
 from pcs.test.tools.assertions import AssertPcsMixin
 from pcs.test.tools.misc import (
     ac,
     get_test_resource as rc,
+    outdent,
 )
 from pcs.test.tools.pcs_runner import (
     pcs,
     PcsRunner,
 )
+from pcs.test.tools import pcs_unittest as unittest
 
 from pcs import utils
 from pcs import resource
@@ -28,10 +29,98 @@ temp_cib = rc("temp-cib.xml")
 large_cib = rc("cib-large.xml")
 temp_large_cib  = rc("temp-cib-large.xml")
 
-class ResourceTest(unittest.TestCase):
+
+class ResourceDescribeTest(unittest.TestCase, AssertPcsMixin):
+    def setUp(self):
+        self.pcs_runner = PcsRunner(temp_cib)
+        self.description = outdent("""\
+            ocf:pacemaker:HealthCPU - System health CPU usage
+
+            Systhem health agent that measures the CPU idling and updates the #health-cpu attribute.
+
+            Resource options:
+              state: Location to store the resource state in.
+              yellow_limit: Lower (!) limit of idle percentage to switch the health
+                            attribute to yellow. I.e. the #health-cpu will go yellow if the
+                            %idle of the CPU falls below 50%.
+              red_limit: Lower (!) limit of idle percentage to switch the health attribute
+                         to red. I.e. the #health-cpu will go red if the %idle of the CPU
+                         falls below 10%.
+
+            Default operations:
+              start: timeout=10
+              stop: timeout=10
+              monitor: interval=10 start-delay=0 timeout=10
+            """
+        )
+
+
+    def test_success(self):
+        self.assert_pcs_success(
+            "resource describe ocf:pacemaker:HealthCPU",
+            self.description
+        )
+
+
+    def test_success_guess_name(self):
+        self.assert_pcs_success(
+            "resource describe healthcpu",
+            "Assumed agent name 'ocf:pacemaker:HealthCPU' (deduced from"
+                + " 'healthcpu')\n"
+                + self.description
+        )
+
+
+    def test_nonextisting_agent(self):
+        self.assert_pcs_fail(
+            "resource describe ocf:pacemaker:nonexistent",
+            (
+                "Error: Agent 'ocf:pacemaker:nonexistent' is not installed or"
+                " does not provide valid metadata: Metadata query for"
+                " ocf:pacemaker:nonexistent failed: -5\n"
+            )
+        )
+
+
+    def test_nonextisting_agent_guess_name(self):
+        self.assert_pcs_fail(
+            "resource describe nonexistent",
+            (
+                "Error: Unable to find agent 'nonexistent', try specifying"
+                " its full name\n"
+            )
+        )
+
+
+    def test_more_agents_guess_name(self):
+        self.assert_pcs_fail(
+            "resource describe dummy",
+            (
+                "Error: Multiple agents match 'dummy', please specify full"
+                " name: ocf:heartbeat:Dummy, ocf:pacemaker:Dummy\n"
+            )
+        )
+
+
+    def test_not_enough_params(self):
+        self.assert_pcs_fail(
+            "resource describe",
+            stdout_start="\nUsage: pcs resource describe...\n"
+        )
+
+
+    def test_too_many_params(self):
+        self.assert_pcs_fail(
+            "resource describe agent1 agent2",
+            stdout_start="\nUsage: pcs resource describe...\n"
+        )
+
+
+class ResourceTest(unittest.TestCase, AssertPcsMixin):
     def setUp(self):
         shutil.copy(empty_cib, temp_cib)
         shutil.copy(large_cib, temp_large_cib)
+        self.pcs_runner = PcsRunner(temp_cib)
 
     # Setups up a cluster with Resources, groups, master/slave resource & clones
     def setupClusterA(self,temp_cib):
@@ -81,21 +170,25 @@ class ResourceTest(unittest.TestCase):
         assert output == ""
 
     def testCaseInsensitive(self):
-        o,r = pcs(temp_cib, "resource create --no-default-ops D1 dummy")
+        o,r = pcs(temp_cib, "resource create --no-default-ops D0 dummy")
+        ac(o, "Error: Multiple agents match 'dummy', please specify full name: ocf:heartbeat:Dummy, ocf:pacemaker:Dummy\n")
+        assert r == 1
+
+        o,r = pcs(temp_cib, "resource create --no-default-ops D1 systemhealth")
+        ac(o, "Creating resource 'ocf:pacemaker:SystemHealth'\n")
         assert r == 0
-        ac(o,'')
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops D2 DUMMY")
+        o,r = pcs(temp_cib, "resource create --no-default-ops D2 SYSTEMHEALTH")
+        ac(o, "Creating resource 'ocf:pacemaker:SystemHealth'\n")
         assert r == 0
-        ac(o,'')
 
         o,r = pcs(temp_cib, "resource create --no-default-ops D3 ipaddr2 ip=1.1.1.1")
+        ac(o, "Creating resource 'ocf:heartbeat:IPaddr2'\n")
         assert r == 0
-        ac(o,'')
 
         o,r = pcs(temp_cib, "resource create --no-default-ops D4 ipaddr3")
+        ac(o,"Error: Unable to find agent 'ipaddr3', try specifying its full name\n")
         assert r == 1
-        ac(o,"Error: Unable to create resource 'ipaddr3', it is not installed on this system (use --force to override)\n")
 
     def testEmpty(self):
         output, returnVal = pcs(temp_cib, "resource")
@@ -103,75 +196,21 @@ class ResourceTest(unittest.TestCase):
         assert output == "NO resources configured\n", "Bad output"
 
 
-    def testDescribe(self):
-        output, returnVal = pcs(temp_cib, "resource describe bad_resource")
-        assert returnVal == 1
-        assert output == "Error: Unable to find resource: bad_resource\n"
-
-        output, returnVal = pcs(temp_cib, "resource describe ocf:heartbeat:Dummy")
-        ac(output, """\
-ocf:heartbeat:Dummy - Example stateless resource agent
-
-This is a Dummy Resource Agent. It does absolutely nothing except 
-keep track of whether its running or not.
-Its purpose in life is for testing and to serve as a template for RA writers.
-
-NB: Please pay attention to the timeouts specified in the actions
-section below. They should be meaningful for the kind of resource
-the agent manages. They should be the minimum advised timeouts,
-but they shouldn't/cannot cover _all_ possible resource
-instances. So, try to be neither overly generous nor too stingy,
-but moderate. The minimum timeouts should never be below 10 seconds.
-
-Resource options:
-  state: Location to store the resource state in.
-  fake: Fake attribute that can be changed to cause a reload
-
-Default operations:
-  start: timeout=20
-  stop: timeout=20
-  monitor: interval=10 timeout=20
-""")
-        self.assertEqual(0, returnVal)
-
-        output, returnVal = pcs(temp_cib, "resource describe Dummy")
-        ac(output, """\
-ocf:heartbeat:Dummy - Example stateless resource agent
-
-This is a Dummy Resource Agent. It does absolutely nothing except 
-keep track of whether its running or not.
-Its purpose in life is for testing and to serve as a template for RA writers.
-
-NB: Please pay attention to the timeouts specified in the actions
-section below. They should be meaningful for the kind of resource
-the agent manages. They should be the minimum advised timeouts,
-but they shouldn't/cannot cover _all_ possible resource
-instances. So, try to be neither overly generous nor too stingy,
-but moderate. The minimum timeouts should never be below 10 seconds.
-
-Resource options:
-  state: Location to store the resource state in.
-  fake: Fake attribute that can be changed to cause a reload
-
-Default operations:
-  start: timeout=20
-  stop: timeout=20
-  monitor: interval=10 timeout=20
-""")
-        self.assertEqual(0, returnVal)
+    def testAddResourcesLargeCib(self):
+        output, returnVal = pcs(
+            temp_large_cib,
+            "resource create dummy0 ocf:heartbeat:Dummy"
+        )
+        assert returnVal == 0
+        ac(output, '')
 
-        output, returnVal = pcs(temp_cib, "resource describe SystemHealth")
+        output, returnVal = pcs(temp_large_cib, "resource show dummy0")
         assert returnVal == 0
         ac(output, """\
-ocf:pacemaker:SystemHealth - SystemHealth resource agent
-
-This is a SystemHealth Resource Agent.  It is used to monitor
-the health of a system via IPMI.
-
-Default operations:
-  start: timeout=20
-  stop: timeout=20
-  monitor: timeout=20
+ Resource: dummy0 (class=ocf provider=heartbeat type=Dummy)
+  Operations: start interval=0s timeout=20 (dummy0-start-interval-0s)
+              stop interval=0s timeout=20 (dummy0-stop-interval-0s)
+              monitor interval=10 timeout=20 (dummy0-monitor-interval-10)
 """)
 
     def testAddResources(self):
@@ -210,10 +249,6 @@ Default operations:
         assert returnVal == 0
         assert output == ""
 
-        output, returnVal = pcs(temp_large_cib, "resource create dummy0 Dummy")
-        assert returnVal == 0
-        ac(output, '')
-
 # Verify all resource have been added
         output, returnVal = pcs(temp_cib, "resource show")
         assert returnVal == 0
@@ -258,7 +293,7 @@ Default operations:
 
         output, returnVal = pcs(
             temp_cib,
-            "resource create A dummy op interval=10"
+            "resource create A ocf:heartbeat:Dummy op interval=10"
         )
         ac(output, """\
 Error: When using 'op' you must specify an operation name and at least one option
@@ -267,7 +302,7 @@ Error: When using 'op' you must specify an operation name and at least one optio
 
         output, returnVal = pcs(
             temp_cib,
-            "resource create A dummy op interval=10 timeout=5"
+            "resource create A ocf:heartbeat:Dummy op interval=10 timeout=5"
         )
         ac(output, """\
 Error: When using 'op' you must specify an operation name after 'op'
@@ -276,7 +311,7 @@ Error: When using 'op' you must specify an operation name after 'op'
 
         output, returnVal = pcs(
             temp_cib,
-            "resource create A dummy op monitor interval=10 op interval=10 op start timeout=10"
+            "resource create A ocf:heartbeat:Dummy op monitor interval=10 op interval=10 op start timeout=10"
         )
         ac(output, """\
 Error: When using 'op' you must specify an operation name and at least one option
@@ -285,7 +320,7 @@ Error: When using 'op' you must specify an operation name and at least one optio
 
         output, returnVal = pcs(
             temp_cib,
-            "resource create A dummy op monitor"
+            "resource create A ocf:heartbeat:Dummy op monitor"
         )
         ac(output, """\
 Error: When using 'op' you must specify an operation name and at least one option
@@ -294,7 +329,7 @@ Error: When using 'op' you must specify an operation name and at least one optio
 
         output, returnVal = pcs(
             temp_cib,
-            "resource create A dummy op monitor interval=10 op stop op start timeout=10"
+            "resource create A ocf:heartbeat:Dummy op monitor interval=10 op stop op start timeout=10"
         )
         ac(output, """\
 Error: When using 'op' you must specify an operation name and at least one option
@@ -303,7 +338,7 @@ Error: When using 'op' you must specify an operation name and at least one optio
 
         output, returnVal = pcs(
             temp_cib,
-            "resource create A dummy op monitor interval=10 timeout=10 op monitor interval=10 timeout=20"
+            "resource create A ocf:heartbeat:Dummy op monitor interval=10 timeout=10 op monitor interval=10 timeout=20"
         )
         ac(output, """\
 Error: operation monitor with interval 10s already specified for A:
@@ -313,7 +348,7 @@ monitor interval=10 timeout=10 (A-monitor-interval-10)
 
         output, returnVal = pcs(
             temp_cib,
-            "resource create A dummy op monitor interval=10 timeout=10 op stop interval=10 timeout=20"
+            "resource create A ocf:heartbeat:Dummy op monitor interval=10 timeout=10 op stop interval=10 timeout=20"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
@@ -330,24 +365,37 @@ monitor interval=10 timeout=10 (A-monitor-interval-10)
     def testAddBadResources(self):
         line = "resource create --no-default-ops bad_resource idontexist test=bad"
         output, returnVal = pcs(temp_cib, line)
+        assert output == "Error: Unable to find agent 'idontexist', try specifying its full name\n",[output]
         assert returnVal == 1
-        assert output == "Error: Unable to create resource 'idontexist', it is not installed on this system (use --force to override)\n",[output]
 
         line = "resource create --no-default-ops bad_resource2 idontexist2 test4=bad3 --force"
         output, returnVal = pcs(temp_cib, line)
+        ac(output, "Error: Unable to find agent 'idontexist2', try specifying its full name\n")
+        assert returnVal == 1
+
+        line = "resource create --no-default-ops bad_resource3 ocf:pacemaker:idontexist3 test=bad"
+        output, returnVal = pcs(temp_cib, line)
+        assert output == "Error: Unable to create resource 'ocf:pacemaker:idontexist3', it is not installed on this system (use --force to override)\n",[output]
+        assert returnVal == 1
+
+        line = "resource create --no-default-ops bad_resource4 ocf:pacemaker:idontexist4 test4=bad3 --force"
+        output, returnVal = pcs(temp_cib, line)
+        ac(output, "Warning: 'ocf:pacemaker:idontexist4' is not installed or does not provide valid metadata\n")
         assert returnVal == 0
-        assert output == ""
 
         line = "resource show --full"
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0
         ac(output, """\
- Resource: bad_resource2 (class=ocf provider=heartbeat type=idontexist2)
+ Resource: bad_resource4 (class=ocf provider=pacemaker type=idontexist4)
   Attributes: test4=bad3
-  Operations: monitor interval=60s (bad_resource2-monitor-interval-60s)
+  Operations: monitor interval=60s (bad_resource4-monitor-interval-60s)
 """)
 
-        output, returnVal = pcs(temp_cib, "resource create dum:my Dummy")
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create dum:my ocf:heartbeat:Dummy"
+        )
         assert returnVal == 1
         ac(output, "Error: invalid resource name 'dum:my', ':' is not a valid character for a resource name\n")
 
@@ -481,7 +529,7 @@ Error: moni=tor does not appear to be a valid operation action
               monitor interval=31s (ClusterIP-monitor-interval-31s)
 """)
 
-        o, r = pcs(temp_cib, "resource create --no-default-ops OPTest Dummy op monitor interval=30s OCF_CHECK_LEVEL=1 op monitor interval=25s OCF_CHECK_LEVEL=1")
+        o, r = pcs(temp_cib, "resource create --no-default-ops OPTest ocf:heartbeat:Dummy op monitor interval=30s OCF_CHECK_LEVEL=1 op monitor interval=25s OCF_CHECK_LEVEL=1")
         ac(o,"")
         assert r == 0
 
@@ -489,7 +537,7 @@ Error: moni=tor does not appear to be a valid operation action
         ac(o," Resource: OPTest (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=30s OCF_CHECK_LEVEL=1 (OPTest-monitor-interval-30s)\n              monitor interval=25s OCF_CHECK_LEVEL=1 (OPTest-monitor-interval-25s)\n")
         assert r == 0
 
-        o, r = pcs(temp_cib, "resource create --no-default-ops OPTest2 Dummy op monitor interval=30s OCF_CHECK_LEVEL=1 op monitor interval=25s OCF_CHECK_LEVEL=2 op start timeout=30s")
+        o, r = pcs(temp_cib, "resource create --no-default-ops OPTest2 ocf:heartbeat:Dummy op monitor interval=30s OCF_CHECK_LEVEL=1 op monitor interval=25s OCF_CHECK_LEVEL=2 op start timeout=30s")
         ac(o,"")
         assert r == 0
 
@@ -517,7 +565,7 @@ start interval=0s timeout=30s (OPTest2-start-interval-0s)
         ac(o," Resource: OPTest2 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=30s OCF_CHECK_LEVEL=1 (OPTest2-monitor-interval-30s)\n              monitor interval=25s OCF_CHECK_LEVEL=2 (OPTest2-monitor-interval-25s)\n              start interval=0s timeout=30s (OPTest2-start-interval-0s)\n              monitor interval=60s timeout=1800s (OPTest2-monitor-interval-60s)\n")
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops OPTest3 Dummy op monitor OCF_CHECK_LEVEL=1")
+        o,r = pcs(temp_cib, "resource create --no-default-ops OPTest3 ocf:heartbeat:Dummy op monitor OCF_CHECK_LEVEL=1")
         ac(o,"")
         assert r == 0
 
@@ -525,7 +573,7 @@ start interval=0s timeout=30s (OPTest2-start-interval-0s)
         ac(o," Resource: OPTest3 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s OCF_CHECK_LEVEL=1 (OPTest3-monitor-interval-60s)\n")
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops OPTest4 Dummy op monitor interval=30s")
+        o,r = pcs(temp_cib, "resource create --no-default-ops OPTest4 ocf:heartbeat:Dummy op monitor interval=30s")
         ac(o,"")
         assert r == 0
 
@@ -537,7 +585,7 @@ start interval=0s timeout=30s (OPTest2-start-interval-0s)
         ac(o," Resource: OPTest4 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s OCF_CHECK_LEVEL=1 (OPTest4-monitor-interval-60s)\n")
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops OPTest5 Dummy")
+        o,r = pcs(temp_cib, "resource create --no-default-ops OPTest5 ocf:heartbeat:Dummy")
         ac(o,"")
         assert r == 0
 
@@ -549,7 +597,7 @@ start interval=0s timeout=30s (OPTest2-start-interval-0s)
         ac(o," Resource: OPTest5 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s OCF_CHECK_LEVEL=1 (OPTest5-monitor-interval-60s)\n")
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops OPTest6 Dummy")
+        o,r = pcs(temp_cib, "resource create --no-default-ops OPTest6 ocf:heartbeat:Dummy")
         ac(o,"")
         assert r == 0
 
@@ -561,7 +609,7 @@ start interval=0s timeout=30s (OPTest2-start-interval-0s)
         ac(o," Resource: OPTest6 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s (OPTest6-monitor-interval-60s)\n              monitor interval=30s OCF_CHECK_LEVEL=1 (OPTest6-monitor-interval-30s)\n")
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops OPTest7 Dummy")
+        o,r = pcs(temp_cib, "resource create --no-default-ops OPTest7 ocf:heartbeat:Dummy")
         ac(o,"")
         assert r == 0
 
@@ -591,7 +639,7 @@ monitor interval=60s OCF_CHECK_LEVEL=1 (OPTest7-monitor-interval-60s)
 """)
         assert r == 1
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops OCFTest1 Dummy")
+        o,r = pcs(temp_cib, "resource create --no-default-ops OCFTest1 ocf:heartbeat:Dummy")
         ac(o,"")
         assert r == 0
 
@@ -811,7 +859,7 @@ monitor interval=60s (state-monitor-interval-60s)
 
         output, returnVal = pcs(
             temp_cib,
-            "resource create A dummy op monitor interval=10 op monitor interval=20"
+            "resource create A ocf:heartbeat:Dummy op monitor interval=10 op monitor interval=20"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
@@ -855,7 +903,7 @@ monitor interval=20 (A-monitor-interval-20)
 
         output, returnVal = pcs(
             temp_cib,
-            "resource create B dummy --no-default-ops"
+            "resource create B ocf:heartbeat:Dummy --no-default-ops"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
@@ -979,11 +1027,11 @@ monitor interval=20 (A-monitor-interval-20)
         self.assertEqual(0, returnVal)
 
     def testGroupDeleteTest(self):
-        o,r = pcs(temp_cib, "resource create --no-default-ops A1 Dummy --group AGroup")
+        o,r = pcs(temp_cib, "resource create --no-default-ops A1 ocf:heartbeat:Dummy --group AGroup")
         assert r == 0
-        o,r = pcs(temp_cib, "resource create --no-default-ops A2 Dummy --group AGroup")
+        o,r = pcs(temp_cib, "resource create --no-default-ops A2 ocf:heartbeat:Dummy --group AGroup")
         assert r == 0
-        o,r = pcs(temp_cib, "resource create --no-default-ops A3 Dummy --group AGroup")
+        o,r = pcs(temp_cib, "resource create --no-default-ops A3 ocf:heartbeat:Dummy --group AGroup")
         assert r == 0
 
         o,r = pcs(temp_cib, "resource show")
@@ -1017,15 +1065,30 @@ monitor interval=20 (A-monitor-interval-20)
         assert returnVal == 0
         assert output =="Removing Constraint - location-ClusterIP3-rh7-1-INFINITY\nDeleting Resource (and group) - ClusterIP3\n"
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops A1 Dummy")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops A1 ocf:heartbeat:Dummy"
+        )
         assert r == 0
-        o,r = pcs(temp_cib, "resource create --no-default-ops A2 Dummy")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops A2 ocf:heartbeat:Dummy"
+        )
         assert r == 0
-        o,r = pcs(temp_cib, "resource create --no-default-ops A3 Dummy")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops A3 ocf:heartbeat:Dummy"
+        )
         assert r == 0
-        o,r = pcs(temp_cib, "resource create --no-default-ops A4 Dummy")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops A4 ocf:heartbeat:Dummy"
+        )
         assert r == 0
-        o,r = pcs(temp_cib, "resource create --no-default-ops A5 Dummy")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops A5 ocf:heartbeat:Dummy"
+        )
         assert r == 0
 
         o,r = pcs(temp_cib, "resource group add AGroup A1 A2 A3 A4 A5")
@@ -1092,21 +1155,45 @@ Ticket Constraints:
         ac(o,' Resource: A1 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s (A1-monitor-interval-60s)\n Resource: A2 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s (A2-monitor-interval-60s)\n Resource: A3 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s (A3-monitor-interval-60s)\n Resource: A4 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s (A4-monitor-interval-60s)\n R [...]
 
     def testGroupAdd(self):
-        o,r = pcs(temp_cib, "resource create --no-default-ops A1 Dummy")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops A1 ocf:heartbeat:Dummy"
+        )
         assert r == 0
-        o,r = pcs(temp_cib, "resource create --no-default-ops A2 Dummy")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops A2 ocf:heartbeat:Dummy"
+        )
         assert r == 0
-        o,r = pcs(temp_cib, "resource create --no-default-ops A3 Dummy")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops A3 ocf:heartbeat:Dummy"
+        )
         assert r == 0
-        o,r = pcs(temp_cib, "resource create --no-default-ops A4 Dummy")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops A4 ocf:heartbeat:Dummy"
+        )
         assert r == 0
-        o,r = pcs(temp_cib, "resource create --no-default-ops A5 Dummy")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops A5 ocf:heartbeat:Dummy"
+        )
         assert r == 0
-        o,r = pcs(temp_cib, "resource create --no-default-ops A6 Dummy --group")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops A6 ocf:heartbeat:Dummy --group"
+        )
         assert r == 1
-        o,r = pcs(temp_cib, "resource create --no-default-ops A6 Dummy --group Dgroup")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops A6 ocf:heartbeat:Dummy --group Dgroup"
+        )
         assert r == 0
-        o,r = pcs(temp_cib, "resource create --no-default-ops A7 Dummy --group Dgroup")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops A7 ocf:heartbeat:Dummy --group Dgroup"
+        )
         assert r == 0
 
         o,r = pcs(temp_cib, "resource group add MyGroup A1 B1")
@@ -1156,10 +1243,16 @@ Ticket Constraints:
      A5\t(ocf::heartbeat:Dummy):\tStopped
 """)
 
-        o, r = pcs(temp_cib, "resource create --no-default-ops A6 Dummy")
+        o, r = pcs(
+            temp_cib,
+            "resource create --no-default-ops A6 ocf:heartbeat:Dummy"
+        )
         self.assertEqual(0, r)
 
-        o, r = pcs(temp_cib, "resource create --no-default-ops A7 Dummy")
+        o, r = pcs(
+            temp_cib,
+            "resource create --no-default-ops A7 ocf:heartbeat:Dummy"
+        )
         self.assertEqual(0, r)
 
         o, r = pcs(temp_cib, "resource group add MyGroup A6 --after A1")
@@ -1259,14 +1352,14 @@ Ticket Constraints:
 
         o,r = pcs(
             temp_cib,
-            "resource create --no-default-ops A8 Dummy --group MyGroup --before A1"
+            "resource create --no-default-ops A8 ocf:heartbeat:Dummy --group MyGroup --before A1"
         )
         ac(o, "")
         self.assertEqual(0, r)
 
         o,r = pcs(
             temp_cib,
-            "resource create --no-default-ops A9 Dummy --group MyGroup --after A1"
+            "resource create --no-default-ops A9 ocf:heartbeat:Dummy --group MyGroup --after A1"
         )
         ac(o, "")
         self.assertEqual(0, r)
@@ -1323,7 +1416,10 @@ Ticket Constraints:
         ac(o, "Error: 'A7' is already a resource\n")
         self.assertEqual(1, r)
 
-        o, r = pcs(temp_cib, "resource create --no-default-ops A0 Dummy --clone")
+        o, r = pcs(
+            temp_cib,
+            "resource create --no-default-ops A0 ocf:heartbeat:Dummy --clone"
+        )
         self.assertEqual(0, r)
         ac(o, "")
 
@@ -1367,19 +1463,55 @@ Deleting Resource (and group) - dummylarge
         assert returnVal == 0
 
     def testGroupOrder(self):
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops A Dummy")
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops B Dummy")
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops C Dummy")
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops D Dummy")
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops E Dummy")
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops F Dummy")
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops G Dummy")
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops H Dummy")
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops I Dummy")
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops J Dummy")
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops K Dummy")
-
-        output, returnVal = pcs(temp_cib, "resource group add RGA A B C E D K J I")
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops A ocf:heartbeat:Dummy"
+        )
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops B ocf:heartbeat:Dummy"
+        )
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops C ocf:heartbeat:Dummy"
+        )
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops D ocf:heartbeat:Dummy"
+        )
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops E ocf:heartbeat:Dummy"
+        )
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops F ocf:heartbeat:Dummy"
+        )
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops G ocf:heartbeat:Dummy"
+        )
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops H ocf:heartbeat:Dummy"
+        )
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops I ocf:heartbeat:Dummy"
+        )
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops J ocf:heartbeat:Dummy"
+        )
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops K ocf:heartbeat:Dummy"
+        )
+
+        output, returnVal = pcs(
+            temp_cib,
+            "resource group add RGA A B C E D K J I"
+        )
         assert returnVal == 0
         assert output == "",output
 
@@ -1406,13 +1538,15 @@ Deleting Resource (and group) - dummylarge
 
     def testRemoveLastResourceFromGroup(self):
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops d1 Dummy --group gr1"
+            temp_cib,
+            "resource create --no-default-ops d1 ocf:heartbeat:Dummy --group gr1"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops d2 Dummy --group gr2"
+            temp_cib,
+            "resource create --no-default-ops d2 ocf:heartbeat:Dummy --group gr2"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
@@ -1440,13 +1574,15 @@ Deleting Resource (and group) - dummylarge
 
     def testRemoveLastResourceFromClonedGroup(self):
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops d1 Dummy --group gr1"
+            temp_cib,
+            "resource create --no-default-ops d1 ocf:heartbeat:Dummy --group gr1"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops d2 Dummy --group gr2"
+            temp_cib,
+            "resource create --no-default-ops d2 ocf:heartbeat:Dummy --group gr2"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
@@ -1477,13 +1613,15 @@ Deleting Resource (and group) - dummylarge
 
     def testRemoveLastResourceFromMasteredGroup(self):
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops d1 Dummy --group gr1"
+            temp_cib,
+            "resource create --no-default-ops d1 ocf:heartbeat:Dummy --group gr1"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops d2 Dummy --group gr2"
+            temp_cib,
+            "resource create --no-default-ops d2 ocf:heartbeat:Dummy --group gr2"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
@@ -1493,11 +1631,12 @@ Deleting Resource (and group) - dummylarge
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource show")
-        ac(output, """\
- Resource Group: gr1
-     d1\t(ocf::heartbeat:Dummy):\tStopped
- Master/Slave Set: gr2-master [gr2]
-""")
+        ac(output, outdent("""\
+             Resource Group: gr1
+                 d1\t(ocf::heartbeat:Dummy):\tStopped
+             Master/Slave Set: gr2-master [gr2]
+            """
+        ))
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource group add gr1 d2")
@@ -1505,72 +1644,74 @@ Deleting Resource (and group) - dummylarge
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource show")
-        ac(output, """\
- Resource Group: gr1
-     d1\t(ocf::heartbeat:Dummy):\tStopped
-     d2\t(ocf::heartbeat:Dummy):\tStopped
-""")
+        ac(output, outdent("""\
+             Resource Group: gr1
+                 d1\t(ocf::heartbeat:Dummy):\tStopped
+                 d2\t(ocf::heartbeat:Dummy):\tStopped
+            """
+        ))
         self.assertEqual(0, returnVal)
 
     def testClusterConfig(self):
         self.setupClusterA(temp_cib)
 
-        output, returnVal = pcs(temp_cib, "config")
-        assert returnVal == 0
-        ac(output, """\
-Cluster Name: test99
-Corosync Nodes:
- rh7-1 rh7-2
-Pacemaker Nodes:
-
-Resources:
- Resource: ClusterIP6 (class=ocf provider=heartbeat type=IPaddr2)
-  Attributes: ip=192.168.0.99 cidr_netmask=32
-  Operations: monitor interval=30s (ClusterIP6-monitor-interval-30s)
- Group: TestGroup1
-  Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
-   Attributes: ip=192.168.0.99 cidr_netmask=32
-   Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
- Group: TestGroup2
-  Resource: ClusterIP2 (class=ocf provider=heartbeat type=IPaddr2)
-   Attributes: ip=192.168.0.99 cidr_netmask=32
-   Operations: monitor interval=30s (ClusterIP2-monitor-interval-30s)
-  Resource: ClusterIP3 (class=ocf provider=heartbeat type=IPaddr2)
-   Attributes: ip=192.168.0.99 cidr_netmask=32
-   Operations: monitor interval=30s (ClusterIP3-monitor-interval-30s)
- Clone: ClusterIP4-clone
-  Resource: ClusterIP4 (class=ocf provider=heartbeat type=IPaddr2)
-   Attributes: ip=192.168.0.99 cidr_netmask=32
-   Operations: monitor interval=30s (ClusterIP4-monitor-interval-30s)
- Master: Master
-  Resource: ClusterIP5 (class=ocf provider=heartbeat type=IPaddr2)
-   Attributes: ip=192.168.0.99 cidr_netmask=32
-   Operations: monitor interval=30s (ClusterIP5-monitor-interval-30s)
-
-Stonith Devices:
-Fencing Levels:
-
-Location Constraints:
-Ordering Constraints:
-Colocation Constraints:
-Ticket Constraints:
-
-Alerts:
- No alerts defined
-
-Resources Defaults:
- No defaults set
-Operations Defaults:
- No defaults set
-
-Cluster Properties:
-
-Quorum:
-  Options:
-""")
+        self.assert_pcs_success("config",outdent("""\
+            Cluster Name: test99
+            Corosync Nodes:
+             rh7-1 rh7-2
+            Pacemaker Nodes:
+
+            Resources:
+             Resource: ClusterIP6 (class=ocf provider=heartbeat type=IPaddr2)
+              Attributes: ip=192.168.0.99 cidr_netmask=32
+              Operations: monitor interval=30s (ClusterIP6-monitor-interval-30s)
+             Group: TestGroup1
+              Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+               Attributes: ip=192.168.0.99 cidr_netmask=32
+               Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
+             Group: TestGroup2
+              Resource: ClusterIP2 (class=ocf provider=heartbeat type=IPaddr2)
+               Attributes: ip=192.168.0.99 cidr_netmask=32
+               Operations: monitor interval=30s (ClusterIP2-monitor-interval-30s)
+              Resource: ClusterIP3 (class=ocf provider=heartbeat type=IPaddr2)
+               Attributes: ip=192.168.0.99 cidr_netmask=32
+               Operations: monitor interval=30s (ClusterIP3-monitor-interval-30s)
+             Clone: ClusterIP4-clone
+              Resource: ClusterIP4 (class=ocf provider=heartbeat type=IPaddr2)
+               Attributes: ip=192.168.0.99 cidr_netmask=32
+               Operations: monitor interval=30s (ClusterIP4-monitor-interval-30s)
+             Master: Master
+              Resource: ClusterIP5 (class=ocf provider=heartbeat type=IPaddr2)
+               Attributes: ip=192.168.0.99 cidr_netmask=32
+               Operations: monitor interval=30s (ClusterIP5-monitor-interval-30s)
+
+            Stonith Devices:
+            Fencing Levels:
+
+            Location Constraints:
+            Ordering Constraints:
+            Colocation Constraints:
+            Ticket Constraints:
+
+            Alerts:
+             No alerts defined
+
+            Resources Defaults:
+             No defaults set
+            Operations Defaults:
+             No defaults set
+
+            Cluster Properties:
+
+            Quorum:
+              Options:
+            """
+        ))
 
     def testCloneRemove(self):
-        o,r = pcs("resource create --no-default-ops D1 Dummy --clone")
+        o,r = pcs(
+            "resource create --no-default-ops D1 ocf:heartbeat:Dummy --clone"
+        )
         ac(o,"")
         assert r == 0
 
@@ -1598,7 +1739,9 @@ Deleting Resource - D1
         assert r == 0
         ac(o,"")
 
-        o, r = pcs("resource create d99 Dummy clone globally-unique=true")
+        o, r = pcs(
+            "resource create d99 ocf:heartbeat:Dummy clone globally-unique=true"
+        )
         ac(o, "")
         assert r == 0
 
@@ -1651,7 +1794,10 @@ Removing Constraint - location-ClusterIP5-rh7-1-INFINITY
 Deleting Resource - ClusterIP5
 """)
 
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops ClusterIP5 Dummy")
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops ClusterIP5 ocf:heartbeat:Dummy"
+        )
         assert returnVal == 0
         assert output == ""
 
@@ -1679,9 +1825,7 @@ Deleting Resource - ClusterIP5
         assert returnVal == 0
         assert output == ""
 
-        output, returnVal = pcs(temp_cib, "config")
-        assert returnVal == 0
-        ac(output, """\
+        self.assert_pcs_success("config","""\
 Cluster Name: test99
 Corosync Nodes:
  rh7-1 rh7-2
@@ -1763,11 +1907,20 @@ Deleting Resource (and group and M/S) - dummylarge
         assert returnVal == 0
 
     def testResourceManage(self):
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops D0 Dummy")
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops D0 ocf:heartbeat:Dummy"
+        )
         assert returnVal == 0
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops D1 Dummy")
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops D1 ocf:heartbeat:Dummy"
+        )
         assert returnVal == 0
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops D2 Dummy")
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops D2 ocf:heartbeat:Dummy"
+        )
         assert returnVal == 0
         output, returnVal = pcs(temp_cib, "resource group add DGroup D0")
         assert returnVal == 0
@@ -1787,19 +1940,31 @@ Deleting Resource (and group and M/S) - dummylarge
         assert returnVal == 0
         assert output == ""
 
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops C1Master Dummy --master")
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops C1Master ocf:heartbeat:Dummy --master"
+        )
         assert returnVal == 0
         assert output == ""
 
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops C2Master Dummy --master")
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops C2Master ocf:heartbeat:Dummy --master"
+        )
         assert returnVal == 0
         assert output == ""
 
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops C3Master Dummy --clone")
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops C3Master ocf:heartbeat:Dummy --clone"
+        )
         assert returnVal == 0
         assert output == ""
 
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops C4Master Dummy clone")
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops C4Master ocf:heartbeat:Dummy clone"
+        )
         assert returnVal == 0
         assert output == ""
 
@@ -1866,7 +2031,7 @@ Deleting Resource (and group and M/S) - dummylarge
 
     def testCloneMasterManage(self):
 # is-managed on the primitive, attempting manage on primitive
-        output, returnVal = pcs(temp_cib, "resource create clone-unmanage Dummy --clone")
+        output, returnVal = pcs(temp_cib, "resource create clone-unmanage ocf:heartbeat:Dummy --clone")
         assert returnVal == 0
         ac (output,'')
 
@@ -1888,7 +2053,7 @@ Deleting Resource (and group and M/S) - dummylarge
         output, returnVal = pcs(temp_cib, "resource delete clone-unmanage")
 
 # is-managed on the clone, attempting manage on primitive
-        output, returnVal = pcs(temp_cib, "resource create clone-unmanage Dummy --clone")
+        output, returnVal = pcs(temp_cib, "resource create clone-unmanage ocf:heartbeat:Dummy --clone")
         ac (output,'')
         assert returnVal == 0
 
@@ -1910,7 +2075,7 @@ Deleting Resource (and group and M/S) - dummylarge
         pcs(temp_cib, "resource delete clone-unmanage")
 
 # is-managed on the primitive, attempting manage on clone
-        output, returnVal = pcs(temp_cib, "resource create clone-unmanage Dummy --clone")
+        output, returnVal = pcs(temp_cib, "resource create clone-unmanage ocf:heartbeat:Dummy --clone")
         assert returnVal == 0
         ac (output,'')
 
@@ -1932,7 +2097,7 @@ Deleting Resource (and group and M/S) - dummylarge
         pcs(temp_cib, "resource delete clone-unmanage")
 
 # is-managed on the clone, attempting manage on clone
-        output, returnVal = pcs(temp_cib, "resource create clone-unmanage Dummy --clone")
+        output, returnVal = pcs(temp_cib, "resource create clone-unmanage ocf:heartbeat:Dummy --clone")
         assert returnVal == 0
         ac (output,'')
 
@@ -1973,11 +2138,11 @@ Deleting Resource (and group and M/S) - dummylarge
         ac (output, ' Master: master-unmanage-master\n  Resource: master-unmanage (class=ocf provider=pacemaker type=Stateful)\n   Operations: monitor interval=60s (master-unmanage-monitor-interval-60s)\n')
 
     def testGroupManage(self):
-        o, r = pcs(temp_cib, "resource create --no-default-ops D1 Dummy --group AG")
+        o, r = pcs(temp_cib, "resource create --no-default-ops D1 ocf:heartbeat:Dummy --group AG")
         self.assertEqual(r, 0)
         ac(o,"")
 
-        o, r = pcs(temp_cib, "resource create --no-default-ops D2 Dummy --group AG")
+        o, r = pcs(temp_cib, "resource create --no-default-ops D2 ocf:heartbeat:Dummy --group AG")
         self.assertEqual(r, 0)
         ac(o,"")
 
@@ -2050,7 +2215,7 @@ Deleting Resource (and group and M/S) - dummylarge
         ac(o," Group: AG\n  Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n   Operations: monitor interval=60s (D1-monitor-interval-60s)\n  Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n   Operations: monitor interval=60s (D2-monitor-interval-60s)\n")
 
     def testMasterMetaCreate(self):
-        o,r = pcs('resource create --no-default-ops F0 Dummy op monitor interval=10s role=Master op monitor interval=20s role=Slave --master meta notify=true')
+        o,r = pcs('resource create --no-default-ops F0 ocf:heartbeat:Dummy op monitor interval=10s role=Master op monitor interval=20s role=Slave --master meta notify=true')
         ac (o,"")
         assert r==0
 
@@ -2059,11 +2224,11 @@ Deleting Resource (and group and M/S) - dummylarge
         assert r==0
 
     def testBadInstanceVariables(self):
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops D0 Dummy test=testC test2=test2a op monitor interval=35 meta test7=test7a test6=")
+        output, returnVal = pcs(temp_cib, "resource create --no-default-ops D0 ocf:heartbeat:Dummy test=testC test2=test2a op monitor interval=35 meta test7=test7a test6=")
         assert returnVal == 1
         assert output == "Error: resource option(s): 'test, test2', are not recognized for resource type: 'ocf:heartbeat:Dummy' (use --force to override)\n", [output]
 
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops --force D0 Dummy test=testC test2=test2a test4=test4A op monitor interval=35 meta test7=test7a test6=")
+        output, returnVal = pcs(temp_cib, "resource create --no-default-ops --force D0 ocf:heartbeat:Dummy test=testC test2=test2a test4=test4A op monitor interval=35 meta test7=test7a test6=")
         assert returnVal == 0
         assert output == "", [output]
 
@@ -2085,11 +2250,11 @@ Deleting Resource (and group and M/S) - dummylarge
         assert returnVal == 0
 
     def testMetaAttrs(self):
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops --force D0 Dummy test=testA test2=test2a op monitor interval=30 meta test5=test5a test6=test6a")
+        output, returnVal = pcs(temp_cib, "resource create --no-default-ops --force D0 ocf:heartbeat:Dummy test=testA test2=test2a op monitor interval=30 meta test5=test5a test6=test6a")
         assert returnVal == 0
         assert output == "", [output]
 
-        output, returnVal = pcs(temp_cib, "resource create --no-default-ops --force D1 Dummy test=testA test2=test2a op monitor interval=30")
+        output, returnVal = pcs(temp_cib, "resource create --no-default-ops --force D1 ocf:heartbeat:Dummy test=testA test2=test2a op monitor interval=30")
         assert returnVal == 0
         assert output == "", [output]
 
@@ -2125,11 +2290,17 @@ Deleting Resource (and group and M/S) - dummylarge
         assert returnVal == 0
 
     def testMSGroup(self):
-        output, returnVal  = pcs(temp_cib, "resource create --no-default-ops D0 Dummy")
+        output, returnVal  = pcs(
+            temp_cib,
+            "resource create --no-default-ops D0 ocf:heartbeat:Dummy"
+        )
         assert returnVal == 0
         assert output == "", [output]
 
-        output, returnVal  = pcs(temp_cib, "resource create --no-default-ops D1 Dummy")
+        output, returnVal  = pcs(
+            temp_cib,
+            "resource create --no-default-ops D1 ocf:heartbeat:Dummy"
+        )
         assert returnVal == 0
         assert output == "", [output]
 
@@ -2154,7 +2325,10 @@ Deleting Resource (and group and M/S) - dummylarge
         assert output == 'Deleting Resource (and group and M/S) - D1\n', [output]
 
     def testUncloneWithConstraints(self):
-        o,r = pcs(temp_cib, "resource create --no-default-ops D0 Dummy")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops D0 ocf:pacemaker:Dummy"
+        )
         ac(o,"")
         assert r == 0
 
@@ -2176,13 +2350,15 @@ Deleting Resource (and group and M/S) - dummylarge
 
     def testUnclone(self):
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops dummy1 Dummy"
+            temp_cib,
+            "resource create --no-default-ops dummy1 ocf:heartbeat:Dummy"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops dummy2 Dummy"
+            temp_cib,
+            "resource create --no-default-ops dummy2 ocf:heartbeat:Dummy"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
@@ -2528,11 +2704,17 @@ Deleting Resource (and group and M/S) - dummylarge
         self.assertEqual(0, returnVal)
 
     def testCloneGroupMember(self):
-        o,r = pcs(temp_cib, "resource create --no-default-ops D0 Dummy --group AG")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops D0 ocf:heartbeat:Dummy --group AG"
+        )
         ac(o,"")
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops D1 Dummy --group AG")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops D1 ocf:heartbeat:Dummy --group AG"
+        )
         ac(o,"")
         assert r == 0
 
@@ -2556,11 +2738,17 @@ Deleting Resource (and group and M/S) - dummylarge
         ac(o," Clone Set: D0-clone [D0]\n Clone Set: D1-clone [D1]\n")
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops D2 Dummy --group AG2")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops D2 ocf:heartbeat:Dummy --group AG2"
+        )
         ac(o,"")
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops D3 Dummy --group AG2")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops D3 ocf:heartbeat:Dummy --group AG2"
+        )
         ac(o,"")
         assert r == 0
 
@@ -2587,15 +2775,15 @@ Deleting Resource (and group and M/S) - dummylarge
         assert r == 0
 
     def testResourceCreationWithGroupOperations(self):
-        o,r = pcs(temp_cib, "resource create --no-default-ops D1 Dummy --group AG2 op monitor interval=32s")
+        o,r = pcs(temp_cib, "resource create --no-default-ops D1 ocf:heartbeat:Dummy --group AG2 op monitor interval=32s")
         ac(o,"")
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops D3 Dummy op monitor interval=34s --group AG2 ")
+        o,r = pcs(temp_cib, "resource create --no-default-ops D3 ocf:heartbeat:Dummy op monitor interval=34s --group AG2 ")
         ac(o,"")
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops D4 Dummy op monitor interval=35s --group=AG2 ")
+        o,r = pcs(temp_cib, "resource create --no-default-ops D4 ocf:heartbeat:Dummy op monitor interval=35s --group=AG2 ")
         ac(o,"")
         assert r == 0
 
@@ -2604,13 +2792,22 @@ Deleting Resource (and group and M/S) - dummylarge
         assert r == 0
 
     def testCloneMaster(self):
-        output, returnVal  = pcs(temp_cib, "resource create --no-default-ops D0 Dummy")
+        output, returnVal  = pcs(
+            temp_cib,
+            "resource create --no-default-ops D0 ocf:heartbeat:Dummy"
+        )
         assert returnVal == 0
         assert output == "", [output]
-        output, returnVal  = pcs(temp_cib, "resource create --no-default-ops D1 Dummy")
+        output, returnVal  = pcs(
+            temp_cib,
+            "resource create --no-default-ops D1 ocf:heartbeat:Dummy"
+        )
         assert returnVal == 0
         assert output == "", [output]
-        output, returnVal  = pcs(temp_cib, "resource create --no-default-ops D2 Dummy")
+        output, returnVal  = pcs(
+            temp_cib,
+            "resource create --no-default-ops D2 ocf:heartbeat:Dummy"
+        )
         assert returnVal == 0
         assert output == "", [output]
 
@@ -2646,15 +2843,24 @@ Deleting Resource (and group and M/S) - dummylarge
         assert returnVal == 0
         assert output == "Deleting Resource - D2\n", [output]
 
-        output, returnVal  = pcs(temp_cib, "resource create --no-default-ops D0 Dummy")
-        output, returnVal  = pcs(temp_cib, "resource create --no-default-ops D2 Dummy")
+        output, returnVal  = pcs(
+            temp_cib,
+            "resource create --no-default-ops D0 ocf:heartbeat:Dummy"
+        )
+        output, returnVal  = pcs(
+            temp_cib,
+            "resource create --no-default-ops D2 ocf:heartbeat:Dummy"
+        )
 
         output, returnVal = pcs(temp_cib, "resource show --full")
         assert returnVal == 0
         assert output == " Master: D1-master-custom\n  Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n   Operations: monitor interval=60s (D1-monitor-interval-60s)\n Resource: D0 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s (D0-monitor-interval-60s)\n Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s (D2-monitor-interval-60s)\n", [output]
 
     def testLSBResource(self):
-        output, returnVal  = pcs(temp_cib, "resource create --no-default-ops D2 lsb:network")
+        output, returnVal  = pcs(
+            temp_cib,
+            "resource create --no-default-ops D2 lsb:network"
+        )
         assert returnVal == 0
         assert output == "", [output]
 
@@ -2676,7 +2882,8 @@ Deleting Resource (and group and M/S) - dummylarge
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops dummy Dummy"
+            temp_cib,
+            "resource create --no-default-ops dummy ocf:heartbeat:Dummy"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
@@ -2825,13 +3032,15 @@ Error: when specifying --master you must use the master id
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops D1 Dummy --clone"
+            temp_cib,
+            "resource create --no-default-ops D1 ocf:heartbeat:Dummy --clone"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops D2 Dummy --group DG"
+            temp_cib,
+            "resource create --no-default-ops D2 ocf:heartbeat:Dummy --group DG"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
@@ -2892,15 +3101,24 @@ Ticket Constraints:
         self.assertEqual(0, returnVal)
 
     def testNoMoveMSClone(self):
-        output, returnVal  = pcs(temp_cib, "resource create --no-default-ops D0 Dummy")
+        output, returnVal  = pcs(
+            temp_cib,
+            "resource create --no-default-ops D0 ocf:heartbeat:Dummy"
+        )
         assert returnVal == 0
         assert output == "", [output]
 
-        output, returnVal  = pcs(temp_cib, "resource create --no-default-ops D1 Dummy --clone")
+        output, returnVal  = pcs(
+            temp_cib,
+            "resource create --no-default-ops D1 ocf:heartbeat:Dummy --clone"
+        )
         assert returnVal == 0
         assert output == "", [output]
 
-        output, returnVal  = pcs(temp_cib, "resource create --no-default-ops D2 Dummy --master")
+        output, returnVal  = pcs(
+            temp_cib,
+            "resource create --no-default-ops D2 ocf:heartbeat:Dummy --master"
+        )
         assert returnVal == 0
         assert output == "", [output]
 
@@ -2946,19 +3164,19 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         assert r == 1
 
     def testDebugStartCloneGroup(self):
-        o,r = pcs("resource create D0 Dummy --group DGroup")
+        o,r = pcs("resource create D0 ocf:heartbeat:Dummy --group DGroup")
         ac(o,"")
         assert r == 0
 
-        o,r = pcs("resource create D1 Dummy --group DGroup")
+        o,r = pcs("resource create D1 ocf:heartbeat:Dummy --group DGroup")
         ac(o,"")
         assert r == 0
 
-        o,r = pcs("resource create D2 Dummy --clone")
+        o,r = pcs("resource create D2 ocf:heartbeat:Dummy --clone")
         ac(o,"")
         assert r == 0
 
-        o,r = pcs("resource create D3 Dummy --master")
+        o,r = pcs("resource create D3 ocf:heartbeat:Dummy --master")
         ac(o,"")
         assert r == 0
 
@@ -2975,7 +3193,10 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         assert r == 1
 
     def testGroupCloneCreation(self):
-        o,r = pcs(temp_cib, "resource create --no-default-ops D1 Dummy --group DGroup")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops D1 ocf:heartbeat:Dummy --group DGroup"
+        )
         assert r == 0
         assert o == ""
 
@@ -3004,11 +3225,17 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         ac(o,"")
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops D1 Dummy --group DGroup")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops D1 ocf:heartbeat:Dummy --group DGroup"
+        )
         assert r == 0
         ac(o,"")
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops D2 Dummy --group DGroup")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops D2 ocf:heartbeat:Dummy --group DGroup"
+        )
         assert r == 0
         ac(o,"")
 
@@ -3041,15 +3268,15 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         ac(o,"NO resources configured\n")
 
     def testResourceCloneCreation(self):
-        output, returnVal  = pcs(temp_cib, "resource create --no-default-ops D1 Dummy --clone")
+        output, returnVal  = pcs(temp_cib, "resource create --no-default-ops D1 ocf:heartbeat:Dummy --clone")
         assert returnVal == 0
         assert output == "", [output]
 
-        output, returnVal  = pcs(temp_cib, "resource create --no-default-ops D2 Dummy --clone")
+        output, returnVal  = pcs(temp_cib, "resource create --no-default-ops D2 ocf:heartbeat:Dummy --clone")
         assert returnVal == 0
         assert output == "", [output]
 
-        output, returnVal  = pcs(temp_cib, "resource create --no-default-ops D3 Dummy --clone globaly-unique=true")
+        output, returnVal  = pcs(temp_cib, "resource create --no-default-ops D3 ocf:heartbeat:Dummy --clone globaly-unique=true")
         assert returnVal == 0
         assert output == "", [output]
 
@@ -3069,7 +3296,7 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         assert returnVal == 0
         assert output == "Deleting Resource - D3\n", [output]
 
-        output,returnVal = pcs(temp_cib, "resource create --no-default-ops dlm ocf:pacemaker:controld op monitor interval=10s --clone meta interleave=true clone-node-max=1 ordered=true")
+        output,returnVal = pcs(temp_cib, "resource create --no-default-ops dlm ocf:pacemaker:Dummy op monitor interval=10s --clone meta interleave=true clone-node-max=1 ordered=true")
         assert output == "", [output]
         assert returnVal == 0
 
@@ -3077,7 +3304,7 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         ac(output, """\
  Clone: dlm-clone
   Meta Attrs: clone-node-max=1 interleave=true ordered=true 
-  Resource: dlm (class=ocf provider=pacemaker type=controld)
+  Resource: dlm (class=ocf provider=pacemaker type=Dummy)
    Operations: monitor interval=10s (dlm-monitor-interval-10s)
 """)
         self.assertEqual(0, returnVal)
@@ -3086,7 +3313,7 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         assert returnVal == 0
         assert output == "Deleting Resource - dlm\n", [output]
 
-        output,returnVal = pcs(temp_cib, "resource create --no-default-ops dlm ocf:pacemaker:controld op monitor interval=10s clone meta interleave=true clone-node-max=1 ordered=true")
+        output,returnVal = pcs(temp_cib, "resource create --no-default-ops dlm ocf:pacemaker:Dummy op monitor interval=10s clone meta interleave=true clone-node-max=1 ordered=true")
         assert output == "", [output]
         assert returnVal == 0
 
@@ -3094,7 +3321,7 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         ac(output, """\
  Clone: dlm-clone
   Meta Attrs: clone-node-max=1 interleave=true ordered=true 
-  Resource: dlm (class=ocf provider=pacemaker type=controld)
+  Resource: dlm (class=ocf provider=pacemaker type=Dummy)
    Operations: monitor interval=10s (dlm-monitor-interval-10s)
 """)
         self.assertEqual(0, returnVal)
@@ -3103,15 +3330,15 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         assert returnVal == 0
         assert output == "Deleting Resource - dlm\n", [output]
 
-        output,returnVal = pcs(temp_cib, "resource create --no-default-ops dlm ocf:pacemaker:controld op monitor interval=10s clone meta interleave=true clone-node-max=1 ordered=true")
+        output,returnVal = pcs(temp_cib, "resource create --no-default-ops dlm ocf:pacemaker:Dummy op monitor interval=10s clone meta interleave=true clone-node-max=1 ordered=true")
         assert returnVal == 0
         assert output == "", [output]
 
-        output,returnVal = pcs(temp_cib, "resource create --no-default-ops dlm ocf:pacemaker:controld op monitor interval=10s clone meta interleave=true clone-node-max=1 ordered=true")
+        output,returnVal = pcs(temp_cib, "resource create --no-default-ops dlm ocf:pacemaker:Dummy op monitor interval=10s clone meta interleave=true clone-node-max=1 ordered=true")
         assert returnVal == 1
         assert output == "Error: unable to create resource/fence device 'dlm', 'dlm' already exists on this system\n", [output]
 
-        output,returnVal = pcs(temp_cib, "resource create --no-default-ops dlm-clone ocf:pacemaker:controld op monitor interval=10s clone meta interleave=true clone-node-max=1 ordered=true")
+        output,returnVal = pcs(temp_cib, "resource create --no-default-ops dlm-clone ocf:pacemaker:Dummy op monitor interval=10s clone meta interleave=true clone-node-max=1 ordered=true")
         assert returnVal == 1
         assert output == "Error: unable to create resource/fence device 'dlm-clone', 'dlm-clone' already exists on this system\n", [output]
 
@@ -3119,7 +3346,7 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         ac(output, """\
  Clone: dlm-clone
   Meta Attrs: clone-node-max=1 interleave=true ordered=true 
-  Resource: dlm (class=ocf provider=pacemaker type=controld)
+  Resource: dlm (class=ocf provider=pacemaker type=Dummy)
    Operations: monitor interval=10s (dlm-monitor-interval-10s)
 """)
         self.assertEqual(0, returnVal)
@@ -3134,13 +3361,15 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
 
     def testResourceCloneId(self):
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops dummy-clone Dummy"
+            temp_cib,
+            "resource create --no-default-ops dummy-clone ocf:heartbeat:Dummy"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops dummy Dummy"
+            temp_cib,
+            "resource create --no-default-ops dummy ocf:heartbeat:Dummy"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
@@ -3164,7 +3393,8 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops dummy Dummy --clone"
+            temp_cib,
+            "resource create --no-default-ops dummy ocf:heartbeat:Dummy --clone"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
@@ -3181,13 +3411,15 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
 
     def testResourceMasterId(self):
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops dummy-master Dummy"
+            temp_cib,
+            "resource create --no-default-ops dummy-master ocf:heartbeat:Dummy"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops dummy Dummy"
+            temp_cib,
+            "resource create --no-default-ops dummy ocf:heartbeat:Dummy"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
@@ -3233,7 +3465,8 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops dummy Dummy --master"
+            temp_cib,
+            "resource create --no-default-ops dummy ocf:heartbeat:Dummy --master"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
@@ -3249,7 +3482,10 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         self.assertEqual(0, returnVal)
 
     def testResourceCloneUpdate(self):
-        o, r  = pcs(temp_cib, "resource create --no-default-ops D1 Dummy --clone")
+        o, r  = pcs(
+            temp_cib,
+            "resource create --no-default-ops D1 ocf:heartbeat:Dummy --clone"
+        )
         assert r == 0
         ac(o, "")
 
@@ -3282,10 +3518,16 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         ac(o, ' Clone: D1-clone\n  Meta Attrs: bar=baz \n  Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n   Operations: monitor interval=60s (D1-monitor-interval-60s)\n')
 
     def testGroupRemoveWithConstraints2(self):
-        o,r = pcs(temp_cib, "resource create --no-default-ops A Dummy --group AG")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops A ocf:heartbeat:Dummy --group AG"
+        )
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops B Dummy --group AG")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops B ocf:heartbeat:Dummy --group AG"
+        )
         assert r == 0
 
         o,r = pcs(temp_cib, "constraint location AG prefers rh7-1")
@@ -3300,9 +3542,15 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         ac(o, " Resource: A (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s (A-monitor-interval-60s)\n Resource: B (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s (B-monitor-interval-60s)\n")
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops A1 Dummy --group AA")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops A1 ocf:heartbeat:Dummy --group AA"
+        )
         assert r == 0
-        o,r = pcs(temp_cib, "resource create --no-default-ops A2 Dummy --group AA")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops A2 ocf:heartbeat:Dummy --group AA"
+        )
         assert r == 0
         o,r = pcs(temp_cib, "resource master AA")
         assert r == 0
@@ -3321,20 +3569,30 @@ Deleting Resource (and group and M/S) - A2
         assert r == 0
 
     def testMasteredGroup(self):
-        o,r = pcs(temp_cib, "resource create --no-default-ops A Dummy --group AG")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops A ocf:heartbeat:Dummy --group AG"
+        )
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops B Dummy --group AG")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops B ocf:heartbeat:Dummy --group AG"
+        )
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops C Dummy --group AG")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops C ocf:heartbeat:Dummy --group AG"
+        )
         assert r == 0
 
         o,r = pcs(temp_cib, "resource master AGMaster AG")
         assert r == 0
 
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops A Dummy"
+            temp_cib,
+            "resource create --no-default-ops A ocf:heartbeat:Dummy"
         )
         ac(output, """\
 Error: unable to create resource/fence device 'A', 'A' already exists on this system
@@ -3342,7 +3600,8 @@ Error: unable to create resource/fence device 'A', 'A' already exists on this sy
         self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops AG Dummy"
+            temp_cib,
+            "resource create --no-default-ops AG ocf:heartbeat:Dummy"
         )
         ac(output, """\
 Error: unable to create resource/fence device 'AG', 'AG' already exists on this system
@@ -3350,7 +3609,8 @@ Error: unable to create resource/fence device 'AG', 'AG' already exists on this
         self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops AGMaster Dummy"
+            temp_cib,
+            "resource create --no-default-ops AGMaster ocf:heartbeat:Dummy"
         )
         ac(output, """\
 Error: unable to create resource/fence device 'AGMaster', 'AGMaster' already exists on this system
@@ -3376,13 +3636,15 @@ Error: unable to create resource/fence device 'AGMaster', 'AGMaster' already exi
 
     def testClonedGroup(self):
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops D1 Dummy --group DG"
+            temp_cib,
+            "resource create --no-default-ops D1 ocf:heartbeat:Dummy --group DG"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops D2 Dummy --group DG"
+            temp_cib,
+            "resource create --no-default-ops D2 ocf:heartbeat:Dummy --group DG"
         )
         ac(output, "")
         self.assertEqual(0, returnVal)
@@ -3403,7 +3665,8 @@ Error: unable to create resource/fence device 'AGMaster', 'AGMaster' already exi
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops D1 Dummy"
+            temp_cib,
+            "resource create --no-default-ops D1 ocf:heartbeat:Dummy"
         )
         ac(output, """\
 Error: unable to create resource/fence device 'D1', 'D1' already exists on this system
@@ -3411,7 +3674,8 @@ Error: unable to create resource/fence device 'D1', 'D1' already exists on this
         self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops DG Dummy"
+            temp_cib,
+            "resource create --no-default-ops DG ocf:heartbeat:Dummy"
         )
         ac(output, """\
 Error: unable to create resource/fence device 'DG', 'DG' already exists on this system
@@ -3419,7 +3683,8 @@ Error: unable to create resource/fence device 'DG', 'DG' already exists on this
         self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
-            temp_cib, "resource create --no-default-ops DG-clone Dummy"
+            temp_cib,
+            "resource create --no-default-ops DG-clone ocf:heartbeat:Dummy"
         )
         ac(output, """\
 Error: unable to create resource/fence device 'DG-clone', 'DG-clone' already exists on this system
@@ -3468,7 +3733,10 @@ Error: Cannot remove more than one resource from cloned group
         self.assertEqual(0, returnVal)
 
     def testResourceEnable(self):
-        o,r = pcs(temp_cib, "resource create --no-default-ops D1 Dummy")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops D1 ocf:heartbeat:Dummy"
+        )
         ac(o,"")
         assert r == 0
 
@@ -3506,7 +3774,10 @@ Error: Cannot remove more than one resource from cloned group
         assert r == 1
 
         # cloned group
-        output, retVal = pcs(temp_cib, "resource create dummy0 Dummy --group group0")
+        output, retVal = pcs(
+            temp_cib,
+            "resource create dummy0 ocf:heartbeat:Dummy --group group0"
+        )
         ac(output, "")
         assert retVal == 0
         output, retVal = pcs(temp_cib, "resource clone group0")
@@ -3520,11 +3791,17 @@ Error: Cannot remove more than one resource from cloned group
         assert retVal == 0
 
     def testResourceEnableUnmanaged(self):
-        o,r = pcs(temp_cib, "resource create --no-default-ops D1 Dummy")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops D1 ocf:heartbeat:Dummy"
+        )
         ac(o,"")
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops D2 Dummy")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops D2 ocf:heartbeat:Dummy"
+        )
         ac(o,"")
         assert r == 0
 
@@ -3554,7 +3831,10 @@ Error: Cannot remove more than one resource from cloned group
         assert r == 0
 
         # resource in an unmanaged group
-        o,r = pcs(temp_cib, "resource create --no-default-ops D3 Dummy")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops D3 ocf:heartbeat:Dummy"
+        )
         ac(o,"")
         assert r == 0
         o,r = pcs("resource group add DG D3")
@@ -3580,7 +3860,10 @@ Error: Cannot remove more than one resource from cloned group
         assert r == 0
 
         # unmanaged resource in a group
-        o,r = pcs(temp_cib, "resource create --no-default-ops D4 Dummy")
+        o,r = pcs(
+            temp_cib,
+            "resource create --no-default-ops D4 ocf:heartbeat:Dummy"
+        )
         ac(o,"")
         assert r == 0
         o,r = pcs("resource group add DG D4")
@@ -3601,7 +3884,8 @@ Error: Cannot remove more than one resource from cloned group
 
     def testResourceEnableClone(self):
         output, retVal = pcs(
-            temp_cib, "resource create --no-default-ops dummy Dummy --clone"
+            temp_cib,
+            "resource create --no-default-ops dummy ocf:heartbeat:Dummy --clone"
         )
         ac(output, "")
         self.assertEqual(retVal, 0)
@@ -3867,23 +4151,23 @@ Error: Cannot remove more than one resource from cloned group
 """)
 
     def testOPOption(self):
-        o,r = pcs(temp_cib, "resource create --no-default-ops A Dummy op monitor interval=30s blah=blah")
+        o,r = pcs(temp_cib, "resource create --no-default-ops A ocf:heartbeat:Dummy op monitor interval=30s blah=blah")
         ac(o,"Error: blah is not a valid op option (use --force to override)\n")
         assert r == 1
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops A Dummy op monitor interval=30s op monitor interval=40s blah=blah")
+        o,r = pcs(temp_cib, "resource create --no-default-ops A ocf:heartbeat:Dummy op monitor interval=30s op monitor interval=40s blah=blah")
         ac(o,"Error: blah is not a valid op option (use --force to override)\n")
         assert r == 1
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops B Dummy")
+        o,r = pcs(temp_cib, "resource create --no-default-ops B ocf:heartbeat:Dummy")
         ac(o,"")
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource update B Dummy op monitor interval=30s blah=blah")
+        o,r = pcs(temp_cib, "resource update B ocf:heartbeat:Dummy op monitor interval=30s blah=blah")
         ac(o,"Error: blah is not a valid op option (use --force to override)\n")
         assert r == 1
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops C Dummy")
+        o,r = pcs(temp_cib, "resource create --no-default-ops C ocf:heartbeat:Dummy")
         ac(o,"")
         assert r == 0
 
@@ -3904,11 +4188,11 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
         ac(o," Resource: B (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s (B-monitor-interval-60s)\n Resource: C (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s (C-monitor-interval-60s)\n")
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource update B Dummy op monitor interval=30s monitor interval=31s role=master")
+        o,r = pcs(temp_cib, "resource update B op monitor interval=30s monitor interval=31s role=master")
         ac(o,"Error: role must be: Stopped, Started, Slave or Master (use --force to override)\n")
         assert r == 1
 
-        o,r = pcs(temp_cib, "resource update B Dummy op monitor interval=30s monitor interval=31s role=Master")
+        o,r = pcs(temp_cib, "resource update B op monitor interval=30s monitor interval=31s role=Master")
         ac(o,"")
         assert r == 0
 
@@ -3916,7 +4200,7 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
         ac(o," Resource: B (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=30s (B-monitor-interval-30s)\n              monitor interval=31s role=Master (B-monitor-interval-31s)\n Resource: C (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s (C-monitor-interval-60s)\n")
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource update B dummy op interval=5s")
+        o,r = pcs(temp_cib, "resource update B op interval=5s")
         ac(o,"Error: interval=5s does not appear to be a valid operation action\n")
         assert r == 1
 
@@ -3939,11 +4223,15 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
         assert r == 1
 
     def groupMSAndClone(self):
-        o,r = pcs("resource create --no-default-ops D1 Dummy --clone")
+        o,r = pcs(
+            "resource create --no-default-ops D1 ocf:heartbeat:Dummy --clone"
+        )
         ac(o,"")
         assert r == 0
 
-        o,r = pcs("resource create --no-default-ops D2 Dummy --master")
+        o,r = pcs(
+            "resource create --no-default-ops D2 ocf:heartbeat:Dummy --master"
+        )
         ac(o,"")
         assert r == 0
 
@@ -3955,16 +4243,18 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
         ac(o,"Error: cannot group master/slave resources\n")
         assert r == 1
 
-        o,r = pcs("resource create --no-default-ops D3 Dummy --master --group xxx --clone")
+        o,r = pcs("resource create --no-default-ops D3 ocf:heartbeat:Dummy --master --group xxx --clone")
         ac(o,"Warning: --group ignored when creating a clone\nWarning: --master ignored when creating a clone\n")
         assert r == 0
 
-        o,r = pcs("resource create --no-default-ops D4 Dummy --master --group xxx")
+        o,r = pcs("resource create --no-default-ops D4 ocf:heartbeat:Dummy --master --group xxx")
         ac(o,"Warning: --group ignored when creating a master\n")
         assert r == 0
 
     def testResourceCloneGroup(self):
-        o,r = pcs("resource create --no-default-ops dummy0 Dummy --group group")
+        o,r = pcs(
+            "resource create --no-default-ops dummy0 ocf:heartbeat:Dummy --group group"
+        )
         ac(o,"")
         assert r == 0
 
@@ -3976,37 +4266,33 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
         ac(o,"Deleting Resource (and group and clone) - dummy0\n")
         assert r == 0
 
-    def testVirtualDomainResource(self):
-        dummy_o,r = pcs("resource describe VirtualDomain")
-        assert r == 0
-
     def testResourceMissingValues(self):
         o,r = pcs("resource create --no-default-ops myip IPaddr2")
-        ac(o,"Error: missing required option(s): 'ip' for resource type: ocf:heartbeat:IPaddr2 (use --force to override)\n")
+        ac(o,"Error: missing required option(s): 'ip' for resource type: ocf:heartbeat:IPaddr2 (use --force to override)\nCreating resource 'ocf:heartbeat:IPaddr2'\n")
         assert r == 1
 
         o,r = pcs("resource create --no-default-ops myip IPaddr2 --force")
-        ac(o,"")
+        ac(o,"Creating resource 'ocf:heartbeat:IPaddr2'\n")
         assert r == 0
 
         o,r = pcs("resource create --no-default-ops myip2 IPaddr2 ip=3.3.3.3")
-        ac(o,"")
+        ac(o,"Creating resource 'ocf:heartbeat:IPaddr2'\n")
         assert r == 0
 
         o,r = pcs("resource create --no-default-ops myfs Filesystem")
-        ac(o,"Error: missing required option(s): 'device, directory, fstype' for resource type: ocf:heartbeat:Filesystem (use --force to override)\n")
+        ac(o,"Error: missing required option(s): 'device, directory, fstype' for resource type: ocf:heartbeat:Filesystem (use --force to override)\nCreating resource 'ocf:heartbeat:Filesystem'\n")
         assert r == 1
 
         o,r = pcs("resource create --no-default-ops myfs Filesystem --force")
-        ac(o,"")
+        ac(o,"Creating resource 'ocf:heartbeat:Filesystem'\n")
         assert r == 0
 
         o,r = pcs("resource create --no-default-ops myfs2 Filesystem device=x directory=y --force")
-        ac(o,"")
+        ac(o,"Creating resource 'ocf:heartbeat:Filesystem'\n")
         assert r == 0
 
         o,r = pcs("resource create --no-default-ops myfs3 Filesystem device=x directory=y fstype=z")
-        ac(o,"")
+        ac(o,"Creating resource 'ocf:heartbeat:Filesystem'\n")
         assert r == 0
 
         o,r = pcs("resource --full")
@@ -4028,20 +4314,20 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
         assert r == 0
 
     def testDefaultOps(self):
-        o,r = pcs("resource create X0 Dummy")
+        o,r = pcs("resource create X0 ocf:heartbeat:Dummy")
         ac(o,"")
         assert r == 0
 
-        o,r = pcs("resource create X1 Dummy op monitor interval=90s")
+        o,r = pcs("resource create X1 ocf:heartbeat:Dummy op monitor interval=90s")
         ac(o,"")
         assert r == 0
 
         o,r = pcs("resource create X2 IPaddr2 ip=1.1.1.1")
-        ac(o,"")
+        ac(o,"Creating resource 'ocf:heartbeat:IPaddr2'\n")
         assert r == 0
 
         o,r = pcs("resource create X3 IPaddr2 ip=1.1.1.1 op monitor interval=1s start timeout=1s stop timeout=1s")
-        ac(o,"")
+        ac(o,"Creating resource 'ocf:heartbeat:IPaddr2'\n")
         assert r == 0
 
         o,r = pcs("resource --full")
@@ -4068,13 +4354,22 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
         assert r == 0
 
     def testClonedMasteredGroup(self):
-        output, retVal = pcs(temp_cib, "resource create dummy1 Dummy --no-default-ops --group dummies")
+        output, retVal = pcs(
+            temp_cib,
+            "resource create dummy1 ocf:heartbeat:Dummy --no-default-ops --group dummies"
+        )
         ac(output, "")
         assert retVal == 0
-        output, retVal = pcs(temp_cib, "resource create dummy2 Dummy --no-default-ops --group dummies")
+        output, retVal = pcs(
+            temp_cib,
+            "resource create dummy2 ocf:heartbeat:Dummy --no-default-ops --group dummies"
+        )
         ac(output, "")
         assert retVal == 0
-        output, retVal = pcs(temp_cib, "resource create dummy3 Dummy --no-default-ops --group dummies")
+        output, retVal = pcs(
+            temp_cib,
+            "resource create dummy3 ocf:heartbeat:Dummy --no-default-ops --group dummies"
+        )
         ac(output, "")
         assert retVal == 0
 
@@ -4111,13 +4406,22 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
         ac(output, "NO resources configured\n")
         assert retVal == 0
 
-        output, retVal = pcs(temp_cib, "resource create dummy1 Dummy --no-default-ops --group dummies")
+        output, retVal = pcs(
+            temp_cib,
+            "resource create dummy1 ocf:heartbeat:Dummy --no-default-ops --group dummies"
+        )
         ac(output, "")
         assert retVal == 0
-        output, retVal = pcs(temp_cib, "resource create dummy2 Dummy --no-default-ops --group dummies")
+        output, retVal = pcs(
+            temp_cib,
+            "resource create dummy2 ocf:heartbeat:Dummy --no-default-ops --group dummies"
+        )
         ac(output, "")
         assert retVal == 0
-        output, retVal = pcs(temp_cib, "resource create dummy3 Dummy --no-default-ops --group dummies")
+        output, retVal = pcs(
+            temp_cib,
+            "resource create dummy3 ocf:heartbeat:Dummy --no-default-ops --group dummies"
+        )
         ac(output, "")
         assert retVal == 0
 
@@ -4156,32 +4460,37 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
 
     def test_relocate_stickiness(self):
         output, retVal = pcs(
-            temp_cib, "resource create D1 dummy --no-default-ops"
+            temp_cib, "resource create D1 ocf:pacemaker:Dummy --no-default-ops"
         )
         self.assertEqual(0, retVal)
         ac(output, "")
         output, retVal = pcs(
-            temp_cib, "resource create DG1 dummy --no-default-ops --group GR"
+            temp_cib,
+            "resource create DG1 ocf:pacemaker:Dummy --no-default-ops --group GR"
         )
         self.assertEqual(0, retVal)
         ac(output, "")
         output, retVal = pcs(
-            temp_cib, "resource create DG2 dummy --no-default-ops --group GR"
+            temp_cib,
+            "resource create DG2 ocf:pacemaker:Dummy --no-default-ops --group GR"
         )
         self.assertEqual(0, retVal)
         ac(output, "")
         output, retVal = pcs(
-            temp_cib, "resource create DC dummy --no-default-ops --clone"
+            temp_cib,
+            "resource create DC ocf:pacemaker:Dummy --no-default-ops --clone"
         )
         self.assertEqual(0, retVal)
         ac(output, "")
         output, retVal = pcs(
-            temp_cib, "resource create DGC1 dummy --no-default-ops --group GRC"
+            temp_cib,
+            "resource create DGC1 ocf:pacemaker:Dummy --no-default-ops --group GRC"
         )
         self.assertEqual(0, retVal)
         ac(output, "")
         output, retVal = pcs(
-            temp_cib, "resource create DGC2 dummy --no-default-ops --group GRC"
+            temp_cib,
+            "resource create DGC2 ocf:pacemaker:Dummy --no-default-ops --group GRC"
         )
         self.assertEqual(0, retVal)
         ac(output, "")
@@ -4190,21 +4499,21 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
         ac(output, "")
 
         status = """\
- Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Resource: D1 (class=ocf provider=pacemaker type=Dummy)
   Operations: monitor interval=60s (D1-monitor-interval-60s)
  Group: GR
-  Resource: DG1 (class=ocf provider=heartbeat type=Dummy)
+  Resource: DG1 (class=ocf provider=pacemaker type=Dummy)
    Operations: monitor interval=60s (DG1-monitor-interval-60s)
-  Resource: DG2 (class=ocf provider=heartbeat type=Dummy)
+  Resource: DG2 (class=ocf provider=pacemaker type=Dummy)
    Operations: monitor interval=60s (DG2-monitor-interval-60s)
  Clone: DC-clone
-  Resource: DC (class=ocf provider=heartbeat type=Dummy)
+  Resource: DC (class=ocf provider=pacemaker type=Dummy)
    Operations: monitor interval=60s (DC-monitor-interval-60s)
  Clone: GRC-clone
   Group: GRC
-   Resource: DGC1 (class=ocf provider=heartbeat type=Dummy)
+   Resource: DGC1 (class=ocf provider=pacemaker type=Dummy)
     Operations: monitor interval=60s (DGC1-monitor-interval-60s)
-   Resource: DGC2 (class=ocf provider=heartbeat type=Dummy)
+   Resource: DGC2 (class=ocf provider=pacemaker type=Dummy)
     Operations: monitor interval=60s (DGC2-monitor-interval-60s)
 """
         cib_original, retVal = pcs(temp_cib, "cluster cib")
@@ -4230,30 +4539,30 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
             f.write(cib_out.toxml())
         output, retVal = pcs(temp_cib, "resource --full")
         ac(output, """\
- Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Resource: D1 (class=ocf provider=pacemaker type=Dummy)
   Meta Attrs: resource-stickiness=0 
   Operations: monitor interval=60s (D1-monitor-interval-60s)
  Group: GR
   Meta Attrs: resource-stickiness=0 
-  Resource: DG1 (class=ocf provider=heartbeat type=Dummy)
+  Resource: DG1 (class=ocf provider=pacemaker type=Dummy)
    Meta Attrs: resource-stickiness=0 
    Operations: monitor interval=60s (DG1-monitor-interval-60s)
-  Resource: DG2 (class=ocf provider=heartbeat type=Dummy)
+  Resource: DG2 (class=ocf provider=pacemaker type=Dummy)
    Meta Attrs: resource-stickiness=0 
    Operations: monitor interval=60s (DG2-monitor-interval-60s)
  Clone: DC-clone
   Meta Attrs: resource-stickiness=0 
-  Resource: DC (class=ocf provider=heartbeat type=Dummy)
+  Resource: DC (class=ocf provider=pacemaker type=Dummy)
    Meta Attrs: resource-stickiness=0 
    Operations: monitor interval=60s (DC-monitor-interval-60s)
  Clone: GRC-clone
   Meta Attrs: resource-stickiness=0 
   Group: GRC
    Meta Attrs: resource-stickiness=0 
-   Resource: DGC1 (class=ocf provider=heartbeat type=Dummy)
+   Resource: DGC1 (class=ocf provider=pacemaker type=Dummy)
     Meta Attrs: resource-stickiness=0 
     Operations: monitor interval=60s (DGC1-monitor-interval-60s)
-   Resource: DGC2 (class=ocf provider=heartbeat type=Dummy)
+   Resource: DGC2 (class=ocf provider=pacemaker type=Dummy)
     Meta Attrs: resource-stickiness=0 
     Operations: monitor interval=60s (DGC2-monitor-interval-60s)
 """)
@@ -4278,25 +4587,25 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
             f.write(cib_out.toxml())
         output, retVal = pcs(temp_cib, "resource --full")
         ac(output, """\
- Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Resource: D1 (class=ocf provider=pacemaker type=Dummy)
   Meta Attrs: resource-stickiness=0 
   Operations: monitor interval=60s (D1-monitor-interval-60s)
  Group: GR
-  Resource: DG1 (class=ocf provider=heartbeat type=Dummy)
+  Resource: DG1 (class=ocf provider=pacemaker type=Dummy)
    Meta Attrs: resource-stickiness=0 
    Operations: monitor interval=60s (DG1-monitor-interval-60s)
-  Resource: DG2 (class=ocf provider=heartbeat type=Dummy)
+  Resource: DG2 (class=ocf provider=pacemaker type=Dummy)
    Operations: monitor interval=60s (DG2-monitor-interval-60s)
  Clone: DC-clone
-  Resource: DC (class=ocf provider=heartbeat type=Dummy)
+  Resource: DC (class=ocf provider=pacemaker type=Dummy)
    Meta Attrs: resource-stickiness=0 
    Operations: monitor interval=60s (DC-monitor-interval-60s)
  Clone: GRC-clone
   Group: GRC
-   Resource: DGC1 (class=ocf provider=heartbeat type=Dummy)
+   Resource: DGC1 (class=ocf provider=pacemaker type=Dummy)
     Meta Attrs: resource-stickiness=0 
     Operations: monitor interval=60s (DGC1-monitor-interval-60s)
-   Resource: DGC2 (class=ocf provider=heartbeat type=Dummy)
+   Resource: DGC2 (class=ocf provider=pacemaker type=Dummy)
     Operations: monitor interval=60s (DGC2-monitor-interval-60s)
 """)
         self.assertEqual(0, retVal)
@@ -4320,24 +4629,24 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
             f.write(cib_out.toxml())
         output, retVal = pcs(temp_cib, "resource --full")
         ac(output, """\
- Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Resource: D1 (class=ocf provider=pacemaker type=Dummy)
   Operations: monitor interval=60s (D1-monitor-interval-60s)
  Group: GR
-  Resource: DG1 (class=ocf provider=heartbeat type=Dummy)
+  Resource: DG1 (class=ocf provider=pacemaker type=Dummy)
    Operations: monitor interval=60s (DG1-monitor-interval-60s)
-  Resource: DG2 (class=ocf provider=heartbeat type=Dummy)
+  Resource: DG2 (class=ocf provider=pacemaker type=Dummy)
    Operations: monitor interval=60s (DG2-monitor-interval-60s)
  Clone: DC-clone
-  Resource: DC (class=ocf provider=heartbeat type=Dummy)
+  Resource: DC (class=ocf provider=pacemaker type=Dummy)
    Operations: monitor interval=60s (DC-monitor-interval-60s)
  Clone: GRC-clone
   Meta Attrs: resource-stickiness=0 
   Group: GRC
    Meta Attrs: resource-stickiness=0 
-   Resource: DGC1 (class=ocf provider=heartbeat type=Dummy)
+   Resource: DGC1 (class=ocf provider=pacemaker type=Dummy)
     Meta Attrs: resource-stickiness=0 
     Operations: monitor interval=60s (DGC1-monitor-interval-60s)
-   Resource: DGC2 (class=ocf provider=heartbeat type=Dummy)
+   Resource: DGC2 (class=ocf provider=pacemaker type=Dummy)
     Meta Attrs: resource-stickiness=0 
     Operations: monitor interval=60s (DGC2-monitor-interval-60s)
 """)
@@ -4362,26 +4671,26 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
             f.write(cib_out.toxml())
         output, retVal = pcs(temp_cib, "resource --full")
         ac(output, """\
- Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+ Resource: D1 (class=ocf provider=pacemaker type=Dummy)
   Operations: monitor interval=60s (D1-monitor-interval-60s)
  Group: GR
   Meta Attrs: resource-stickiness=0 
-  Resource: DG1 (class=ocf provider=heartbeat type=Dummy)
+  Resource: DG1 (class=ocf provider=pacemaker type=Dummy)
    Meta Attrs: resource-stickiness=0 
    Operations: monitor interval=60s (DG1-monitor-interval-60s)
-  Resource: DG2 (class=ocf provider=heartbeat type=Dummy)
+  Resource: DG2 (class=ocf provider=pacemaker type=Dummy)
    Meta Attrs: resource-stickiness=0 
    Operations: monitor interval=60s (DG2-monitor-interval-60s)
  Clone: DC-clone
   Meta Attrs: resource-stickiness=0 
-  Resource: DC (class=ocf provider=heartbeat type=Dummy)
+  Resource: DC (class=ocf provider=pacemaker type=Dummy)
    Meta Attrs: resource-stickiness=0 
    Operations: monitor interval=60s (DC-monitor-interval-60s)
  Clone: GRC-clone
   Group: GRC
-   Resource: DGC1 (class=ocf provider=heartbeat type=Dummy)
+   Resource: DGC1 (class=ocf provider=pacemaker type=Dummy)
     Operations: monitor interval=60s (DGC1-monitor-interval-60s)
-   Resource: DGC2 (class=ocf provider=heartbeat type=Dummy)
+   Resource: DGC2 (class=ocf provider=pacemaker type=Dummy)
     Operations: monitor interval=60s (DGC2-monitor-interval-60s)
 """)
         self.assertEqual(0, retVal)
@@ -4493,15 +4802,15 @@ class ResourcesReferencedFromAclTest(unittest.TestCase, AssertPcsMixin):
         self.pcs_runner = PcsRunner(temp_cib)
 
     def test_remove_referenced_primitive_resource(self):
-        self.assert_pcs_success('resource create dummy Dummy')
+        self.assert_pcs_success('resource create dummy ocf:heartbeat:Dummy')
         self.assert_pcs_success('acl role create read-dummy read id dummy')
         self.assert_pcs_success('resource delete dummy', [
             'Deleting Resource - dummy'
         ])
 
     def test_remove_group_with_referenced_primitive_resource(self):
-        self.assert_pcs_success('resource create dummy1 Dummy')
-        self.assert_pcs_success('resource create dummy2 Dummy')
+        self.assert_pcs_success('resource create dummy1 ocf:heartbeat:Dummy')
+        self.assert_pcs_success('resource create dummy2 ocf:heartbeat:Dummy')
         self.assert_pcs_success('resource group add dummy-group dummy1 dummy2')
         self.assert_pcs_success('acl role create read-dummy read id dummy2')
         self.assert_pcs_success('resource delete dummy-group', [
@@ -4512,8 +4821,8 @@ class ResourcesReferencedFromAclTest(unittest.TestCase, AssertPcsMixin):
         ])
 
     def test_remove_referenced_group(self):
-        self.assert_pcs_success('resource create dummy1 Dummy')
-        self.assert_pcs_success('resource create dummy2 Dummy')
+        self.assert_pcs_success('resource create dummy1 ocf:heartbeat:Dummy')
+        self.assert_pcs_success('resource create dummy2 ocf:heartbeat:Dummy')
         self.assert_pcs_success('resource group add dummy-group dummy1 dummy2')
         self.assert_pcs_success('acl role create acl-role-a read id dummy-group')
         self.assert_pcs_success('resource delete dummy-group', [
@@ -4529,7 +4838,9 @@ class CloneMasterUpdate(unittest.TestCase, AssertPcsMixin):
         self.pcs_runner = PcsRunner(temp_cib)
 
     def test_no_op_allowed_in_clone_update(self):
-        self.assert_pcs_success("resource create dummy Dummy --clone")
+        self.assert_pcs_success(
+            "resource create dummy ocf:heartbeat:Dummy --clone"
+        )
         self.assert_pcs_success(
             "resource show dummy-clone",
             """\
@@ -4560,7 +4871,9 @@ class CloneMasterUpdate(unittest.TestCase, AssertPcsMixin):
         )
 
     def test_no_op_allowed_in_master_update(self):
-        self.assert_pcs_success("resource create dummy Dummy --master")
+        self.assert_pcs_success(
+            "resource create dummy ocf:heartbeat:Dummy --master"
+        )
         self.assert_pcs_success(
             "resource show dummy-master",
             """\
@@ -4596,7 +4909,7 @@ class ResourceRemoveWithTicketTest(unittest.TestCase, AssertPcsMixin):
         self.pcs_runner = PcsRunner(temp_cib)
 
     def test_remove_ticket(self):
-        self.assert_pcs_success('resource create A Dummy')
+        self.assert_pcs_success('resource create A ocf:heartbeat:Dummy')
         self.assert_pcs_success(
             'constraint ticket add T master A loss-policy=fence'
         )
diff --git a/pcs/test/test_rule.py b/pcs/test/test_rule.py
index ad3448d..0ea3a8f 100644
--- a/pcs/test/test_rule.py
+++ b/pcs/test/test_rule.py
@@ -1665,7 +1665,10 @@ class DomRuleAddTest(unittest.TestCase):
 
     def setUp(self):
         shutil.copy(empty_cib, temp_cib)
-        output, returnVal = pcs(temp_cib, "resource create dummy1 Dummy")
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create dummy1 ocf:heartbeat:Dummy"
+        )
         assert returnVal == 0 and output == ""
 
     def test_success_xml(self):
diff --git a/pcs/test/test_stonith.py b/pcs/test/test_stonith.py
index 82b2c84..5cac717 100644
--- a/pcs/test/test_stonith.py
+++ b/pcs/test/test_stonith.py
@@ -6,30 +6,75 @@ from __future__ import (
 )
 
 import shutil
-from pcs.test.tools import pcs_unittest as unittest
 
+from pcs.test.tools.assertions import AssertPcsMixin
 from pcs.test.tools.misc import (
     ac,
     get_test_resource as rc,
 )
-from pcs.test.tools.pcs_runner import pcs
+from pcs.test.tools.pcs_runner import pcs, PcsRunner
+from pcs.test.tools import pcs_unittest as unittest
 
 from pcs import utils
 
 empty_cib = rc("cib-empty.xml")
 temp_cib = rc("temp-cib.xml")
 
+
+class StonithDescribeTest(unittest.TestCase, AssertPcsMixin):
+    def setUp(self):
+        self.pcs_runner = PcsRunner(temp_cib)
+
+
+    def test_success(self):
+        self.assert_pcs_success(
+            "stonith describe fence_apc",
+            stdout_start="""\
+fence_apc - Fence agent for APC over telnet/ssh
+
+fence_apc is an I/O Fencing agent which can be used with the APC network power switch. It logs into device via telnet/ssh  and reboots a specified outlet. Lengthy telnet/ssh connections should be avoided while a GFS cluster  is  running  because  the  connection will block any necessary fencing actions.
+
+Stonith options:
+"""
+        )
+
+
+    def test_nonextisting_agent(self):
+        self.assert_pcs_fail(
+            "stonith describe fence_noexist",
+            (
+                "Error: Agent 'fence_noexist' is not installed or does not"
+                " provide valid metadata: Metadata query for"
+                " stonith:fence_noexist failed: -5\n"
+            )
+        )
+
+
+    def test_not_enough_params(self):
+        self.assert_pcs_fail(
+            "stonith describe",
+            stdout_start="\nUsage: pcs stonith describe...\n"
+        )
+
+
+    def test_too_many_params(self):
+        self.assert_pcs_fail(
+            "stonith describe agent1 agent2",
+            stdout_start="\nUsage: pcs stonith describe...\n"
+        )
+
+
 class StonithTest(unittest.TestCase):
     def setUp(self):
         shutil.copy(empty_cib, temp_cib)
 
     def testStonithCreation(self):
         output, returnVal = pcs(temp_cib, "stonith create test1 fence_noxist")
+        ac(output, "Error: Agent 'fence_noxist' is not installed or does not provide valid metadata: Metadata query for stonith:fence_noxist failed: -5, use --force to override\n")
         assert returnVal == 1
-        assert output == "Error: Agent 'fence_noxist' not found, use --force to override\n"
 
         output, returnVal = pcs(temp_cib, "stonith create test1 fence_noxist --force")
-        ac(output, "Warning: Agent 'fence_noxist' not found\n")
+        ac(output, "Warning: Agent 'fence_noxist' is not installed or does not provide valid metadata: Metadata query for stonith:fence_noxist failed: -5\n")
         self.assertEqual(returnVal, 0)
 
         output, returnVal = pcs(temp_cib, "stonith create test2 fence_apc")
diff --git a/pcs/test/tools/assertions.py b/pcs/test/tools/assertions.py
index 1151809..4c8f8df 100644
--- a/pcs/test/tools/assertions.py
+++ b/pcs/test/tools/assertions.py
@@ -115,7 +115,7 @@ class ExtendedAssertionsMixin(object):
                         "Property {property} doesn't exist in exception"
                         " {exception}".format(
                             property=prop,
-                            exception=expected_exception.__class__.__name__
+                            exception=e.__class__.__name__
                         )
                     )
 
diff --git a/pcs/test/tools/color_text_runner.py b/pcs/test/tools/color_text_runner.py
deleted file mode 100644
index b8383f6..0000000
--- a/pcs/test/tools/color_text_runner.py
+++ /dev/null
@@ -1,117 +0,0 @@
-from __future__ import (
-    absolute_import,
-    division,
-    print_function,
-    unicode_literals,
-)
-
-from pcs.test.tools import pcs_unittest as unittest
-
-
-palete = {
-    "black": '\033[30m',
-    "red": '\033[31m',
-    "green": '\033[32m',
-    "orange": '\033[33m',
-    "blue": '\033[34m',
-    "purple": '\033[35m',
-    "cyan": '\033[36m',
-    "lightgrey": '\033[37m',
-    "darkgrey": '\033[90m',
-    "lightred": '\033[91m',
-    "lightgreen": '\033[92m',
-    "yellow": '\033[93m',
-    "lightblue": '\033[94m',
-    "pink": '\033[95m',
-    "lightcyan": '\033[96m',
-    "end" : '\033[0m',
-    "bold" : '\033[1m',
-    "underline" : '\033[4m',
-}
-
-def apply(key_list, text):
-    return("".join([palete[key] for key in key_list]) + text + palete["end"])
-
-TextTestResult = unittest.TextTestResult
-#pylint: disable=bad-super-call
-class ColorTextTestResult(TextTestResult):
-    def addSuccess(self, test):
-        super(TextTestResult, self).addSuccess(test)
-        if self.showAll:
-            self.stream.writeln(apply(["green", "bold"], "OK"))
-        elif self.dots:
-            self.stream.write(apply(["green", "bold"], "."))
-            self.stream.flush()
-
-    def addError(self, test, err):
-        super(TextTestResult, self).addError(test, err)
-        if self.showAll:
-            self.stream.writeln(apply(["red", "bold"], "ERROR"))
-        elif self.dots:
-            self.stream.write(apply(["red", "bold"], 'E'))
-            self.stream.flush()
-
-    def addFailure(self, test, err):
-        super(TextTestResult, self).addFailure(test, err)
-        if self.showAll:
-            self.stream.writeln(apply(["lightred", "bold"], "FAIL"))
-        elif self.dots:
-            self.stream.write(apply(["lightred", "bold"], 'F'))
-            self.stream.flush()
-
-    def addSkip(self, test, reason):
-        super(TextTestResult, self).addSkip(test, reason)
-        if self.showAll:
-            self.stream.writeln(
-                apply(["blue", "bold"], "skipped {0!r}".format(reason))
-            )
-        elif self.dots:
-            self.stream.write(apply(["blue", "bold"], 's'))
-            self.stream.flush()
-
-    def getDescription(self, test):
-        doc_first_line = test.shortDescription()
-        if self.descriptions and doc_first_line:
-            return '\n'.join((str(test), doc_first_line))
-        else:
-            module_parts = test.__class__.__module__.split(".")
-            module = module_parts[-1]
-            package = ".".join(module_parts[:-1])+"." if module_parts else ""
-
-            return (
-                test._testMethodName
-                +" "
-                +apply(["lightgrey"], "(")
-                +apply(["lightgrey"], package)
-                +apply(["bold"], module)
-                +"."
-                +test.__class__.__name__
-                +apply(["lightgrey"], ")")
-            )
-
-    def __format_test_name(self, test):
-        return (
-            test.__class__.__module__
-            + "." + test.__class__.__name__
-            + "." + test._testMethodName
-        )
-
-    def printErrors(self):
-        super(ColorTextTestResult, self).printErrors()
-        if not self.errors and not self.failures:
-            return
-
-        self.stream.writeln()
-        self.stream.writeln(self.separator1)
-        self.stream.writeln()
-        self.stream.writeln(
-            "for running failed tests only (errors are first then failures):"
-        )
-        self.stream.writeln()
-        self.stream.write(" \\\n".join(
-            [
-                self.__format_test_name(test)
-                for test, _ in self.errors + self.failures
-            ]
-        ))
-        self.stream.writeln()
diff --git a/pcs/test/tools/color_text_runner/__init__.py b/pcs/test/tools/color_text_runner/__init__.py
new file mode 100644
index 0000000..a82d63e
--- /dev/null
+++ b/pcs/test/tools/color_text_runner/__init__.py
@@ -0,0 +1,8 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.test.tools.color_text_runner.result import get_text_test_result_class
diff --git a/pcs/test/tools/color_text_runner/format.py b/pcs/test/tools/color_text_runner/format.py
new file mode 100644
index 0000000..b54ad8c
--- /dev/null
+++ b/pcs/test/tools/color_text_runner/format.py
@@ -0,0 +1,157 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import re
+from functools import partial
+
+
+palete = {
+    "black": '\033[30m',
+    "red": '\033[31m',
+    "green": '\033[32m',
+    "orange": '\033[33m',
+    "blue": '\033[34m',
+    "purple": '\033[35m',
+    "cyan": '\033[36m',
+    "lightgrey": '\033[37m',
+    "darkgrey": '\033[90m',
+    "lightred": '\033[91m',
+    "lightgreen": '\033[92m',
+    "yellow": '\033[93m',
+    "lightblue": '\033[94m',
+    "pink": '\033[95m',
+    "lightcyan": '\033[96m',
+    "end" : '\033[0m',
+    "bold" : '\033[1m',
+    "underline" : '\033[4m',
+}
+
+separator1 = '=' * 70
+separator2 = '-' * 70
+
+#apply is builtin but is deprecated since 2.3 => no problem to redefine it here
+def apply(key_list, text):
+    return("".join([palete[key] for key in key_list]) + text + palete["end"])
+
+lightgrey = partial(apply, ["lightgrey"])
+bold = partial(apply, ["bold"])
+blue = partial(apply, ["blue", "bold"])
+red = partial(apply, ["red", "bold"])
+green = partial(apply, ["green", "bold"])
+
+def format_module_name(name):
+    prefix = ""
+    part_list = name.split("_")
+
+    if part_list[0].startswith("test"):
+        prefix = "test"
+        part_list[0] = part_list[0][len("test"):]
+
+    return prefix + "_".join([bold(part) for part in part_list])
+
+def format_module(test):
+    parts = test.__class__.__module__.split(".")
+    return lightgrey(".").join(parts[:-1] + [format_module_name(parts[-1])])
+
+def format_test_method_name(test):
+    parts = test._testMethodName.split("_")
+
+    if parts[0].startswith("test"):
+        parts[0] = lightgrey("test") + parts[0][len("test"):]
+
+    return lightgrey("_").join(parts)
+
+def format_error_overview(errors, failures, slash_last):
+    return [
+        red("for running failed tests only (errors are first then failures):"),
+        "",
+    ] + [
+        lightgrey(err) for err in slash_errors(
+            [format_test_name(test) for test, _ in errors + failures],
+            slash_last
+        )
+    ] + [""]
+
+def slash_errors(error_list, slash_last=True):
+    if not slash_last:
+        return slash_errors(error_list[:-1]) + [error_list[-1]]
+    return ["{0} \\".format(err) for err in error_list]
+
+def format_test_name(test):
+    return (
+        format_module(test)
+        + "." + test.__class__.__name__
+        + "." + format_test_method_name(test)
+    )
+
+def get_description(test, descriptions):
+    doc_first_line = test.shortDescription()
+    if descriptions and doc_first_line:
+        return '\n'.join((str(test), doc_first_line))
+    else:
+        module_parts = test.__class__.__module__.split(".")
+        module = module_parts[-1]
+        package = ".".join(module_parts[:-1])+"." if module_parts else ""
+
+        return (
+            test._testMethodName
+            +" "
+            +lightgrey("(")
+            +lightgrey(package)
+            +bold(module)
+            +"."
+            +test.__class__.__name__
+            +lightgrey(")")
+        )
+
+def format_error_list(flavour, errors, descriptions, traceback_highlight):
+    line_list = []
+    for test, err in errors:
+        line_list.extend([
+            lightgrey(separator1),
+            "%s: %s" % (red(flavour), get_description(test, descriptions)),
+            lightgrey(separator2),
+            "%s" % format_traceback(err) if traceback_highlight else err,
+            "",
+        ])
+    return line_list
+
+def format_traceback(err):
+    formated_err = []
+    path_regex = re.compile(
+        '^  File "(?P<path>[^"]+)", line (?P<line>\d+), in (?P<name>.*)$'
+    )
+    was_prev_path = False
+    for line in err.splitlines():
+        if line == "Traceback (most recent call last):":
+            formated_err.append(lightgrey(line))
+            was_prev_path = False
+            continue
+
+        match = path_regex.match(line)
+        if match:
+            path = match.group("path").split("/")
+            formated_err.append(
+                lightgrey('  File "')
+                + lightgrey("/").join(path[:-1] + [bold(path[-1])])
+                + lightgrey('", line ') + bold(match.group("line"))
+                + lightgrey(', in ') + bold(match.group("name"))
+            )
+            was_prev_path = True
+        elif was_prev_path:
+            formated_err.append(bold(line))
+            was_prev_path = False
+        else:
+            formated_err.append(line)
+            was_prev_path = False
+    return "\n".join(formated_err)
+
+def format_skips(skip_map):
+    return [blue("Some tests have been skipped:")] + [
+        lightgrey("{0} ({1}x)".format(reason, len(test_list)))
+        for reason, test_list in skip_map.items()
+    ] + [""]
diff --git a/pcs/test/tools/color_text_runner/result.py b/pcs/test/tools/color_text_runner/result.py
new file mode 100644
index 0000000..600b7a3
--- /dev/null
+++ b/pcs/test/tools/color_text_runner/result.py
@@ -0,0 +1,120 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+from pcs.test.tools import pcs_unittest as unittest
+from pcs.test.tools.color_text_runner.format import (
+    separator1,
+    format_error_list,
+    format_error_overview,
+    format_skips,
+)
+from pcs.test.tools.color_text_runner.writer import (
+    DotWriter,
+    StandardVerboseWriter,
+    ImprovedVerboseWriter,
+    Writer,
+)
+
+
+def get_text_test_result_class(
+    slash_last_fail_in_overview=False,
+    traditional_verbose=False,
+    traceback_highlight=False,
+):
+    #TextTestResult is neede here. Direct inheriting from TestResult does not
+    #work in python 2.6
+    TextTestResult = unittest.TextTestResult
+    # pylint: disable=bad-super-call
+    class ColorTextTestResult(TextTestResult):
+        def __init__(self, stream, descriptions, verbosity):
+            super(ColorTextTestResult, self).__init__(
+                stream,
+                descriptions,
+                verbosity
+            )
+            self.verbosity = 2 if traditional_verbose else verbosity
+
+            self.reportWriter = self.__chooseWriter()(
+                self.stream,
+                self.descriptions,
+            )
+            self.skip_map = {}
+
+        def startTest(self, test):
+            super(TextTestResult, self).startTest(test)
+            self.reportWriter.startTest(test)
+
+        def addSuccess(self, test):
+            super(TextTestResult, self).addSuccess(test)
+            self.reportWriter.addSuccess(test)
+
+        def addError(self, test, err):
+            super(TextTestResult, self).addError(test, err)
+            self.reportWriter.addError(test, err)
+
+        def addFailure(self, test, err):
+            super(TextTestResult, self).addFailure(test, err)
+            self.reportWriter.addFailure(test, err)
+
+        def addSkip(self, test, reason):
+            super(TextTestResult, self).addSkip(test, reason)
+            self.skip_map.setdefault(reason, []).append(test)
+            self.reportWriter.addSkip(test, reason)
+
+        def addExpectedFailure(self, test, err):
+            super(TextTestResult, self).addExpectedFailure(test, err)
+            self.reportWriter.addExpectedFailure(test, err)
+
+        def addUnexpectedSuccess(self, test):
+            super(TextTestResult, self).addUnexpectedSuccess(test)
+            self.reportWriter.addUnexpectedSuccess(test)
+
+        def printErrors(self):
+            line_list = (
+                format_error_list(
+                    'ERROR',
+                    self.errors,
+                    self.descriptions,
+                    traceback_highlight,
+                )
+                +
+                format_error_list(
+                    'FAIL',
+                    self.failures,
+                    self.descriptions,
+                    traceback_highlight,
+                )
+            )
+
+            if (self.errors + self.failures) or self.skip_map:
+                line_list.extend([separator1, ""])
+
+            if self.errors + self.failures:
+                line_list.extend([""] + format_error_overview(
+                    self.errors,
+                    self.failures,
+                    slash_last_fail_in_overview
+                ))
+
+            if self.skip_map:
+                line_list.extend([""] + format_skips(self.skip_map))
+
+            if self.verbosity:
+                line_list.insert(0, "")
+
+            for line in line_list:
+                self.stream.writeln(line)
+
+        def __chooseWriter(self):
+            if traditional_verbose:
+                return StandardVerboseWriter
+            if self.verbosity > 1:
+                return ImprovedVerboseWriter
+            if self.verbosity > 0:
+                return DotWriter
+            return Writer
+
+    return ColorTextTestResult
diff --git a/pcs/test/tools/color_text_runner/writer.py b/pcs/test/tools/color_text_runner/writer.py
new file mode 100644
index 0000000..80e7374
--- /dev/null
+++ b/pcs/test/tools/color_text_runner/writer.py
@@ -0,0 +1,132 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.test.tools.color_text_runner.format import (
+    blue,
+    red,
+    green,
+    lightgrey,
+    get_description,
+    format_module,
+    format_test_method_name,
+)
+
+
+class Writer(object):
+    def __init__(self, stream, descriptions):
+        self.stream = stream
+        self.descriptions = descriptions
+
+    def addSuccess(self, test):
+        pass
+
+    def addError(self, test, err):
+        pass
+
+    def addFailure(self, test, err):
+        pass
+
+    def addSkip(self, test, reason):
+        pass
+
+    def startTest(self, test):
+        pass
+
+    def addExpectedFailure(self, test, err):
+        pass
+
+    def addUnexpectedSuccess(self, test):
+        pass
+
+class DotWriter(Writer):
+    def addSuccess(self, test):
+        self.stream.write(green("."))
+        self.stream.flush()
+
+    def addError(self, test, err):
+        self.stream.write(red('E'))
+        self.stream.flush()
+
+    def addFailure(self, test, err):
+        self.stream.write(red('F'))
+        self.stream.flush()
+
+    def addSkip(self, test, reason):
+        self.stream.write(blue('s'))
+        self.stream.flush()
+
+    def addExpectedFailure(self, test, err):
+        self.stream.write(blue('x'))
+        self.stream.flush()
+
+    def addUnexpectedSuccess(self, test):
+        self.stream.write(red('u'))
+        self.stream.flush()
+
+class StandardVerboseWriter(Writer):
+    def addSuccess(self, test):
+        self.stream.writeln(green("OK"))
+
+    def addError(self, test, err):
+        self.stream.writeln(red("ERROR"))
+
+    def addFailure(self, test, err):
+        self.stream.writeln(red("FAIL"))
+
+    def addSkip(self, test, reason):
+        self.stream.writeln(
+            blue("skipped {0!r}".format(reason))
+        )
+
+    def startTest(self, test):
+        self.stream.write(get_description(test, self.descriptions))
+        self.stream.write(" ... ")
+        self.stream.flush()
+
+    def addExpectedFailure(self, test, err):
+        self.stream.writeln(blue("expected failure"))
+
+    def addUnexpectedSuccess(self, test):
+        self.stream.writeln(red("unexpected success"))
+
+class ImprovedVerboseWriter(StandardVerboseWriter):
+    def __init__(self, stream, descriptions):
+        super(ImprovedVerboseWriter, self).__init__(stream, descriptions)
+        self.last_test = None
+
+    def __is_new_module(self, test):
+        return (
+            not self.last_test
+            or
+            test.__class__.__module__ != self.last_test.__class__.__module__
+        )
+
+    def __is_new_class(self, test):
+        return (
+            self.__is_new_module(test)
+            or
+            test.__class__.__name__ != self.last_test.__class__.__name__
+        )
+
+    def __format_module(self, test):
+        if not self.__is_new_module(test):
+            return lightgrey(test.__class__.__module__)
+        return format_module(test)
+
+    def __format_class(self, test):
+        if not self.__is_new_class(test):
+            return lightgrey(test.__class__.__name__)
+        return test.__class__.__name__
+
+    def startTest(self, test):
+        self.stream.write(
+            self.__format_module(test) + lightgrey(".")
+            + self.__format_class(test) + lightgrey(".")
+            + format_test_method_name(test) + lightgrey(" : ")
+        )
+        self.stream.flush()
+        self.last_test = test
diff --git a/pcs/test/tools/misc.py b/pcs/test/tools/misc.py
index 745b228..4696497 100644
--- a/pcs/test/tools/misc.py
+++ b/pcs/test/tools/misc.py
@@ -64,3 +64,11 @@ def create_patcher(target_prefix):
             "{0}.{1}".format(target_prefix, target), *args, **kwargs
         )
     return patch
+
+def outdent(text):
+    line_list = text.splitlines()
+    smallest_indentation = min([
+        len(line) - len(line.lstrip(" "))
+        for line in line_list if line
+    ])
+    return "\n".join([line[smallest_indentation:] for line in line_list])
diff --git a/pcs/test/tools/pcs_runner.py b/pcs/test/tools/pcs_runner.py
index 5f43cdc..584a1d9 100644
--- a/pcs/test/tools/pcs_runner.py
+++ b/pcs/test/tools/pcs_runner.py
@@ -28,7 +28,7 @@ class PcsRunner(object):
             else corosync_conf_file
         )
         self.cluster_conf_file = (
-            rc("corosync.conf") if cluster_conf_file is None
+            rc("cluster.conf") if cluster_conf_file is None
             else cluster_conf_file
         )
 
diff --git a/pcs/test/tools/pcs_unittest.py b/pcs/test/tools/pcs_unittest.py
index 7b7b37a..20f4f0c 100644
--- a/pcs/test/tools/pcs_unittest.py
+++ b/pcs/test/tools/pcs_unittest.py
@@ -145,6 +145,7 @@ def ensure_raise_from_iterable_side_effect():
     exception is simply returned (in older version of mock).
     """
     def create_new_call(old_call, inPy3k):
+        # pylint: disable=old-style-class
         class OldStyleClass:
             pass
         ClassTypes = (type,) if inPy3k else (type, type(OldStyleClass))
diff --git a/pcs/test/tools/test/__init__.py b/pcs/test/tools/test/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/test/tools/test/test_misc.py b/pcs/test/tools/test/test_misc.py
new file mode 100644
index 0000000..0d0f319
--- /dev/null
+++ b/pcs/test/tools/test/test_misc.py
@@ -0,0 +1,45 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase
+from pcs.test.tools.misc import outdent
+
+class OutdentTest(TestCase):
+    def test_returns_the_same_text_when_not_indented(self):
+        text = "\n".join([
+            "first line",
+            "  second line",
+            "    third line",
+        ])
+        self.assertEqual(text, outdent(text))
+
+    def test_remove_the_smallest_indentation(self):
+        self.assertEqual(
+            "\n".join([
+                "  first line",
+                "second line",
+                "  third line",
+            ]),
+            outdent("\n".join([
+                "    first line",
+                "  second line",
+                "    third line",
+            ]))
+        )
+
+    def test_very_ugly_indented_text(self):
+        self.assertEqual(
+            """\
+Cluster Name: test99
+  Options:
+""",
+            outdent("""\
+                Cluster Name: test99
+                  Options:
+                """
+            )
+        )
diff --git a/pcs/usage.py b/pcs/usage.py
index ea407c3..35fc1be 100644
--- a/pcs/usage.py
+++ b/pcs/usage.py
@@ -197,15 +197,15 @@ Commands:
         specified, only show groups (and their resources).  If --hide-inactive
         is specified, only show active resources.
 
-    list [<standard|provider|type>] [--nodesc]
-        Show list of all available resources, optionally filtered by specified
-        type, standard or provider.  If --nodesc is used then descriptions
-        of resources are not printed.
+    list [filter] [--nodesc]
+        Show list of all available resource agents (if filter is provided then
+        only resource agents matching the filter will be shown). If --nodesc is
+        used then descriptions of resource agents are not printed.
 
-    describe <standard:provider:type|type>
+    describe [<standard>:[<provider>:]]<type>
         Show options for the specified resource.
 
-    create <resource id> <standard:provider:type|type> [resource options]
+    create <resource id> [<standard>:[<provider>:]]<type> [resource options]
            [op <operation action> <operation options> [<operation action>
            <operation options>]...] [meta <meta options>...]
            [--clone <clone options> | --master <master options> |
@@ -820,6 +820,10 @@ Commands:
         To prevent accidental running of this command, --force or interactive
         user response is required in order to proceed.
 
+        NOTE: It is not checked if the specified node exists in the cluster
+        in order to be able to work with nodes not visible from the local
+        cluster partition.
+
     sbd enable [--watchdog=<path>[@<node>]] ... [<SBD_OPTION>=<value>] ...
         Enable SBD in cluster. Default path for watchdog device is
         /dev/watchdog. Allowed SBD options: SBD_WATCHDOG_TIMEOUT (default: 5),
@@ -1072,21 +1076,26 @@ Commands:
         Delete the role specified and remove it from any users/groups it was
         assigned to.
 
-    role assign <role id> [to] <username/group>
+    role assign <role id> [to] [user|group] <username/group>
         Assign a role to a user or group already created with 'pcs acl
-        user/group create'.
+        user/group create'. If there is user and group with the same id and it
+        is not specified which should be used, user will be prioritized. In
+        cases like this specify whenever user or group should be used.
 
-    role unassign <role id> [from] <username/group>
-        Remove a role from the specified user.
+    role unassign <role id> [from] [user|group] <username/group>
+        Remove a role from the specified user. If there is user and group with
+        the same id and it is not specified which should be used, user will be
+        prioritized. In cases like this specify whenever user or group should
+        be used.
 
-    user create <username> <role id> [<role id>]...
+    user create <username> [<role id>]...
         Create an ACL for the user specified and assign roles to the user.
 
     user delete <username>
         Remove the user specified (and roles assigned will be unassigned for
         the specified user).
 
-    group create <group> <role id> [<role id>]...
+    group create <group> [<role id>]...
         Create an ACL for the group specified and assign roles to the group.
 
     group delete <group>
@@ -1515,8 +1524,8 @@ Commands:
             [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
         Update existing alert handler with specified id.
 
-    remove <alert-id>
-        Remove alert handler with specified id.
+    remove <alert-id> ...
+        Remove alert handlers with specified ids.
 
     recipient add <alert-id> value=<recipient-value> [id=<recipient-id>]
             [description=<description>] [options [<option>=<value>]...]
@@ -1528,8 +1537,8 @@ Commands:
             [meta [<meta-option>=<value>]...]
         Update existing recipient identified by it's id.
 
-    recipient remove <recipient-id>
-        Remove specified recipient.
+    recipient remove <recipient-id> ...
+        Remove specified recipients.
 """
     if pout:
         print(sub_usage(args, output))
diff --git a/pcs/utils.py b/pcs/utils.py
index 1e99bc9..50f00bc 100644
--- a/pcs/utils.py
+++ b/pcs/utils.py
@@ -31,20 +31,25 @@ from pcs.cli.common.reports import (
     process_library_reports,
     LibraryReportProcessorToConsole as LibraryReportProcessorToConsole,
 )
-from pcs.common.tools import simple_cache
+from pcs.common.tools import (
+    join_multilines,
+    simple_cache,
+)
 from pcs.lib import reports, sbd
 from pcs.lib.env import LibraryEnvironment
 from pcs.lib.errors import LibraryError
 from pcs.lib.external import (
     CommandRunner,
-    is_cman_cluster,
-    is_systemctl,
-    is_service_enabled,
-    is_service_running,
     disable_service,
     DisableServiceError,
     enable_service,
     EnableServiceError,
+    is_cman_cluster as lib_is_cman_cluster,
+    is_service_enabled,
+    is_service_running,
+    is_systemctl,
+    _service,
+    _systemctl,
 )
 import pcs.lib.resource_agent as lib_ra
 import pcs.lib.corosync.config_parser as corosync_conf_parser
@@ -60,6 +65,7 @@ from pcs.lib.pacemaker_values import(
 from pcs.cli.common import middleware
 from pcs.cli.common.env import Env
 from pcs.cli.common.lib_wrapper import Library
+from pcs.cli.common.reports import build_report_message
 from pcs.cli.booth.command import DEFAULT_BOOTH_NAME
 import pcs.cli.booth.env
 
@@ -104,7 +110,6 @@ DEFAULT_RESOURCE_ACTIONS = ["monitor", "start", "stop", "promote", "demote"]
 usefile = False
 filename = ""
 pcs_options = {}
-fence_bin = settings.fence_agent_binaries
 
 
 class UnknownPropertyException(Exception):
@@ -668,7 +673,7 @@ def addNodeToClusterConf(node):
     ])
     if retval == 0:
         for line in output.splitlines():
-            fence_name, fence_args = line.split(":", 1)
+            fence_name, dummy_fence_args = line.split(":", 1)
             all_fence_names.add(fence_name)
             match = re.match("(^|(.* ))agent=fence_pcmk((,.+)|$)", line)
             if match:
@@ -871,6 +876,7 @@ def run(
         env_extend = dict()
     env_var = env_extend
     env_var.update(dict(os.environ))
+    env_var["LC_ALL"] = "C"
     if usefile:
         env_var["CIB_file"] = filename
 
@@ -931,6 +937,7 @@ def cmd_runner():
     if usefile:
         env_vars["CIB_file"] = filename
     env_vars.update(os.environ)
+    env_vars["LC_ALL"] = "C"
     return CommandRunner(
         logging.getLogger("old_cli"),
         get_report_processor(),
@@ -1112,16 +1119,16 @@ def create_task_list(report, action, node_list, *args, **kwargs):
     ]
 
 def parallel_for_nodes(action, node_list, *args, **kwargs):
-    error_list = []
+    node_errors = dict()
     def report(node, returncode, output):
         message = '{0}: {1}'.format(node, output.strip())
         print(message)
         if returncode != 0:
-            error_list.append(message)
+            node_errors[node] = message
     run_parallel(
         create_task_list(report, action, node_list, *args, **kwargs)
     )
-    return error_list
+    return node_errors
 
 def prepare_node_name(node, pm_nodes, cs_nodes):
     '''
@@ -1470,11 +1477,6 @@ def resource_running_on(resource, passed_state=None, stopped=False):
         "nodes_slave": nodes_slave,
     }
 
-def does_resource_have_options(ra_type):
-    if ra_type.startswith("ocf:") or ra_type.startswith("stonith:") or ra_type.find(':') == -1:
-        return True
-    return False
-
 def filter_default_op_from_actions(resource_actions):
     filtered = []
     for action in resource_actions:
@@ -1482,7 +1484,7 @@ def filter_default_op_from_actions(resource_actions):
             continue
         new_action = dict([
             (name, value)
-            for name, value in sorted(action.items())
+            for name, value in action.items()
             if name != "depth"
         ])
         filtered.append(new_action)
@@ -1490,13 +1492,20 @@ def filter_default_op_from_actions(resource_actions):
 
 # Given a resource agent (ocf:heartbeat:XXX) return an list of default
 # operations or an empty list if unable to find any default operations
-def get_default_op_values(ra_type):
+def get_default_op_values(full_agent_name):
     default_ops = []
     try:
-        metadata = lib_ra.get_resource_agent_metadata(cmd_runner(), ra_type)
-        actions = filter_default_op_from_actions(
-            lib_ra.get_agent_actions(metadata)
-        )
+        if full_agent_name.startswith("stonith:"):
+            metadata = lib_ra.StonithAgent(
+                cmd_runner(),
+                full_agent_name[len("stonith:"):]
+            )
+        else:
+            metadata = lib_ra.ResourceAgent(
+                cmd_runner(),
+                full_agent_name
+            )
+        actions = filter_default_op_from_actions(metadata.get_actions())
 
         for action in actions:
             op = [action["name"]]
@@ -1504,15 +1513,11 @@ def get_default_op_values(ra_type):
                 if key != "name" and action[key] != "0":
                     op.append("{0}={1}".format(key, action[key]))
             default_ops.append(op)
-    except (
-        lib_ra.UnsupportedResourceAgent,
-        lib_ra.AgentNotFound,
-        lib_ra.UnableToGetAgentMetadata
-    ):
+    except lib_ra.UnableToGetAgentMetadata:
         return []
-    except lib_ra.ResourceAgentLibError as e:
+    except lib_ra.ResourceAgentError as e:
         process_library_reports(
-            [lib_ra.resource_agent_lib_error_to_report_item(e)]
+            [lib_ra.resource_agent_error_to_report_item(e)]
         )
     except LibraryError as e:
         process_library_reports(e.args)
@@ -1541,67 +1546,6 @@ def validate_wait_get_timeout(need_cib_support=True):
     return wait_timeout
 
 
-def is_file_abs_path(path):
-    return path == os.path.abspath(path) and os.path.isfile(path)
-
-# Check and see if the specified resource (or stonith) type is present on the
-# file system and properly responds to a meta-data request
-def is_valid_resource(resource, caseInsensitiveCheck=False):
-    try:
-        if resource.startswith("stonith:"):
-            lib_ra.get_fence_agent_metadata(
-                cmd_runner(), resource.split("stonith:", 1)[1]
-            )
-        else:
-            lib_ra.get_resource_agent_metadata(cmd_runner(), resource)
-        # return True if no exception was raised
-        return True
-    except lib_ra.UnsupportedResourceAgent:
-        pass
-    except (lib_ra.ResourceAgentLibError, LibraryError):
-        # agent not exists or obtaining metadata failed
-        return False
-
-    if resource.startswith("lsb:"):
-        agent = os.path.join("/etc/init.d/", resource.split(":", 1)[1])
-        return is_file_abs_path(agent)
-    elif resource.startswith("systemd:"):
-        _, agent_name = resource.split(":", 1)
-        agent1 = os.path.join(
-            "/etc/systemd/system/", agent_name + ".service"
-        )
-        agent2 = os.path.join(
-            "/usr/lib/systemd/system/", agent_name + ".service"
-        )
-        return is_file_abs_path(agent1) or is_file_abs_path(agent2)
-
-    # resource name is not full, maybe it's ocf resource
-    for provider in sorted(os.listdir(settings.ocf_resources)):
-        provider_path = os.path.join(settings.ocf_resources, provider)
-        if caseInsensitiveCheck:
-            if os.path.isdir(provider_path):
-                for f in os.listdir(provider_path):
-                    if (
-                        f.lower() == resource.lower() and
-                        os.path.isfile(os.path.join(provider_path, f))
-                    ):
-                        return "ocf:{0}:{1}".format(provider, f)
-                continue
-
-        if os.path.exists(
-            os.path.join(settings.ocf_resources, provider, resource)
-        ):
-            try:
-                lib_ra.get_resource_agent_metadata(
-                    cmd_runner(),
-                    "ocf:{0}:{1}".format(provider, resource)
-                )
-                return True
-            except (LibraryError, lib_ra.ResourceAgentLibError):
-                continue
-    return False
-
-
 # Return matches from the CIB with the xpath_query
 def get_cib_xpath(xpath_query):
     args = ["cibadmin", "-Q", "--xpath", xpath_query]
@@ -1941,7 +1885,7 @@ def stonithCheck():
     if not usefile:
         # check if SBD daemon is running
         try:
-            if is_service_running(cmd_runner(), "sbd"):
+            if is_service_running(cmd_runner(), sbd.get_sbd_service_name()):
                 return False
         except LibraryError:
             pass
@@ -2112,7 +2056,7 @@ def validate_xml_id(var, description="id"):
     try:
         validate_id(var, description)
     except LibraryError as e:
-        return False, e.args[0].message
+        return False, build_report_message(e.args[0])
     return True, ""
 
 def is_iso8601_date(var):
@@ -2150,9 +2094,12 @@ def verify_cert_key_pair(cert, key):
     return errors
 
 
- at simple_cache
 def is_rhel6():
-    return is_cman_cluster(cmd_runner())
+    return is_cman_cluster()
+
+ at simple_cache
+def is_cman_cluster():
+    return lib_is_cman_cluster(cmd_runner())
 
 def err(errorText, exit_after_error=True):
     sys.stderr.write("Error: %s\n" % errorText)
@@ -2161,19 +2108,18 @@ def err(errorText, exit_after_error=True):
 
 
 def serviceStatus(prefix):
-    if not is_systemctl():
-        return
     print("Daemon Status:")
     service_def = [
         # (
         #     service name,
         #     display even if not enabled nor running
         # )
+        ("cman", False),
         ("corosync", True),
         ("pacemaker", True),
         ("pacemaker_remote", False),
         ("pcsd", True),
-        ("sbd", False),
+        (sbd.get_sbd_service_name(), False),
     ]
     for service, display_always in service_def:
         try:
@@ -2228,6 +2174,22 @@ def disableServices():
     if report_item_list:
         raise LibraryError(*report_item_list)
 
+def start_service(service):
+    if is_systemctl():
+        stdout, stderr, retval = cmd_runner().run([
+            _systemctl, "start", service
+        ])
+    else:
+        stdout, stderr, retval = cmd_runner().run([_service, service, "start"])
+    return join_multilines([stderr, stdout]), retval
+
+def stop_service(service):
+    if is_systemctl():
+        stdout, stderr, retval = cmd_runner().run([_systemctl, "stop", service])
+    else:
+        stdout, stderr, retval = cmd_runner().run([_service, service, "stop"])
+    return join_multilines([stderr, stdout]), retval
+
 def write_file(path, data, permissions=0o644, binary=False):
     if os.path.exists(path):
         if "--force" not in pcs_options:
@@ -2730,19 +2692,6 @@ def get_cluster_property_from_xml(etree_el):
         property["longdesc"] = ""
     return property
 
-# DEPRECATED use lxml version available in pcs.lib.cib.tools
-def get_acls(dom):
-    acls = dom.getElementsByTagName("acls")
-    if len(acls) == 0:
-        acls = dom.createElement("acls")
-        conf = dom.getElementsByTagName("configuration")
-        if len(conf) == 0:
-            err("Unable to get configuration section of cib")
-        conf[0].appendChild(acls)
-    else:
-        acls = acls[0]
-    return acls
-
 def get_lib_env():
     user = None
     groups = None
@@ -2806,7 +2755,10 @@ def get_middleware_factory():
             pcs_options.get("--name", DEFAULT_BOOTH_NAME),
             pcs_options.get("--booth-conf", None),
             pcs_options.get("--booth-key", None),
-        )
+        ),
+        cluster_conf_read_only=middleware.cluster_conf_read_only(
+            pcs_options.get("--cluster_conf", None)
+        ),
     )
 
 def get_library_wrapper():
@@ -2822,7 +2774,9 @@ def get_modificators():
     #commands is not an issue
     return {
         "autocorrect": "--autocorrect" in pcs_options,
+        "autodelete": "--autodelete" in pcs_options,
         "corosync_conf": pcs_options.get("--corosync_conf", None),
+        "describe": "--nodesc" not in pcs_options,
         "enable": "--enable" in pcs_options,
         "force": "--force" in pcs_options,
         "full": "--full" in pcs_options,
diff --git a/pcsd/Makefile b/pcsd/Makefile
index 9a4a4ba..e5ee6de 100644
--- a/pcsd/Makefile
+++ b/pcsd/Makefile
@@ -8,7 +8,7 @@ build_gems_rhel6:
 	gem install --verbose --no-rdoc --no-ri -l -i vendor/bundle/ruby \
 	vendor/cache/backports-3.6.8.gem \
 	vendor/cache/json-1.8.3.gem \
-	vendor/cache/multi_json-1.12.1.gem \
+	vendor/cache/multi_json-1.12.0.gem \
 	vendor/cache/open4-1.3.4.gem \
 	vendor/cache/orderedhash-0.0.6.gem \
 	vendor/cache/rack-1.6.4.gem \
diff --git a/pcsd/auth.rb b/pcsd/auth.rb
index 18f934b..e87f5a3 100644
--- a/pcsd/auth.rb
+++ b/pcsd/auth.rb
@@ -1,5 +1,6 @@
 require 'json'
 require 'securerandom'
+gem 'rpam-ruby19'
 require 'rpam'
 require 'base64'
 
diff --git a/pcsd/bootstrap.rb b/pcsd/bootstrap.rb
index 193431b..f944d20 100644
--- a/pcsd/bootstrap.rb
+++ b/pcsd/bootstrap.rb
@@ -27,6 +27,7 @@ def is_systemctl()
       '/usr/bin/systemctl',
       '/bin/systemctl',
       '/var/run/systemd/system',
+      '/run/systemd/system',
   ]
   systemctl_paths.each { |path|
     return true if File.exist?(path)
@@ -43,7 +44,7 @@ def get_pcs_path(pcsd_path)
   end
 end
 
-PCS_VERSION = '0.9.154'
+PCS_VERSION = '0.9.155'
 COROSYNC = COROSYNC_BINARIES + "corosync"
 ISRHEL6 = is_rhel6
 ISSYSTEMCTL = is_systemctl
diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb
index b8f363a..22850e3 100644
--- a/pcsd/cluster_entity.rb
+++ b/pcsd/cluster_entity.rb
@@ -1034,11 +1034,7 @@ module ClusterEntity
       node.services.each do |service, info|
         info[:running] = is_service_running?(service.to_s)
         info[:enabled] = is_service_enabled?(service.to_s)
-        if ISSYSTEMCTL
-          # temporary solution
-          # is_service_installed is implemented only for systemd systems
-          info[:installed] = is_service_installed?(service.to_s)
-        end
+        info[:installed] = is_service_installed?(service.to_s)
       end
       node.corosync = node.services[:corosync][:running]
       node.corosync_enabled = node.services[:corosync][:enabled]
diff --git a/pcsd/fenceagent.rb b/pcsd/fenceagent.rb
index 28c5980..02cb7a8 100644
--- a/pcsd/fenceagent.rb
+++ b/pcsd/fenceagent.rb
@@ -1,13 +1,21 @@
-def getFenceAgents()
+def getFenceAgents(auth_user)
   fence_agent_list = {}
-  agents = Dir.glob('/usr/sbin/fence_' + '*')
+  stdout, stderr, retval = run_cmd(
+    auth_user, PCS, "stonith", "list", "--nodesc"
+  )
+  if retval != 0
+    $logger.error("Error running 'pcs stonith list --nodesc")
+    $logger.error(stdout + stderr)
+    return {}
+  end
+
+  agents = stdout
   agents.each { |a|
     fa = FenceAgent.new
-    fa.name =  a.sub(/.*\//,"")
-    next if fa.name == "fence_ack_manual"
+    fa.name = a.chomp
     fence_agent_list[fa.name] = fa
   }
-  fence_agent_list
+  return fence_agent_list
 end
 
 class FenceAgent
diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
index ddb7322..11d3b2b 100644
--- a/pcsd/pcs.rb
+++ b/pcsd/pcs.rb
@@ -248,12 +248,14 @@ def add_acl_usergroup(auth_user, acl_role_id, user_group, name)
     if retval == 0
       return ""
     end
-    if not /^error: (user|group) #{name.to_s} already exists$/i.match(stderr.join("\n").strip)
+    $logger.info(stdout)
+    if not /^Error: '#{name.to_s}' already exists$/i.match(stderr.join("\n").strip)
       return stderr.join("\n").strip
     end
   end
   stdout, stderror, retval = run_cmd(
-    auth_user, PCS, "acl", "role", "assign", acl_role_id.to_s, name.to_s
+    auth_user, PCS, "acl", "role", "assign",
+    acl_role_id.to_s, user_group, name.to_s
   )
   if retval != 0
     if stderror.empty?
@@ -279,11 +281,18 @@ def remove_acl_permission(auth_user, acl_perm_id)
   return ""
 end
 
-def remove_acl_usergroup(auth_user, role_id, usergroup_id)
-  stdout, stderror, retval = run_cmd(
-    auth_user, PCS, "acl", "role", "unassign", role_id.to_s, usergroup_id.to_s,
-    "--autodelete"
-  )
+def remove_acl_usergroup(auth_user, role_id, usergroup_id, user_or_group)
+  if ['user', 'group'].include?(user_or_group)
+    stdout, stderror, retval = run_cmd(
+      auth_user, PCS, "acl", "role", "unassign", role_id.to_s, user_or_group,
+      usergroup_id.to_s, "--autodelete"
+    )
+  else
+    stdout, stderror, retval = run_cmd(
+      auth_user, PCS, "acl", "role", "unassign", role_id.to_s,
+      usergroup_id.to_s, "--autodelete"
+    )
+  end
   if retval != 0
     if stderror.empty?
       return "Error removing user / group"
@@ -994,6 +1003,7 @@ def run_cmd_options(auth_user, options, *args)
     ps_write.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
     ENV['CIB_user'] = cib_user
     ENV['CIB_user_groups'] = cib_groups
+    ENV['LC_ALL'] = 'C'
     exec(*args)
   }
 
@@ -1718,6 +1728,7 @@ def get_node_status(auth_user, cib_dom)
       :fence_levels => get_fence_levels(auth_user, cib_dom),
       :node_attr => node_attrs_to_v2(get_node_attributes(auth_user, cib_dom)),
       :nodes_utilization => get_nodes_utilization(cib_dom),
+      :alerts => get_alerts(auth_user),
       :known_nodes => [],
       :available_features => [
         'constraint_colocation_set',
@@ -1725,6 +1736,7 @@ def get_node_status(auth_user, cib_dom)
         'ticket_constraints',
         'moving_resource_in_group',
         'unmanaged_resource',
+        'alerts',
       ]
   }
 
@@ -1983,17 +1995,18 @@ def is_service_installed?(service)
       if line.split(' ')[0] == service
         return true
       end
-      return false
     }
+    return false
   end
+
   stdout, _, retcode = run_cmd(
-    PCSAuth.getSuperuserAuth(), PCS, 'resource', 'list', 'systemd'
+    PCSAuth.getSuperuserAuth(), 'systemctl', 'list-unit-files', '--full'
   )
   if retcode != 0
     return nil
   end
   stdout.each { |line|
-    if line.strip() == "systemd:#{service}"
+    if line.strip().start_with?("#{service}.service")
       return true
     end
   }
@@ -2028,9 +2041,12 @@ def disable_service(service)
 end
 
 def start_service(service)
-  _, _, retcode = run_cmd(
-    PCSAuth.getSuperuserAuth(), "service", service, "start"
-  )
+  if ISSYSTEMCTL
+    cmd = ['systemctl', 'start', "#{service}.service"]
+  else
+    cmd = ['service', service, 'start']
+  end
+  _, _, retcode = run_cmd(PCSAuth.getSuperuserAuth(), *cmd)
   return (retcode == 0)
 end
 
@@ -2038,9 +2054,12 @@ def stop_service(service)
   if not is_service_installed?(service)
     return true
   end
-  _, _, retcode = run_cmd(
-    PCSAuth.getSuperuserAuth(), "service", service, "stop"
-  )
+  if ISSYSTEMCTL
+    cmd = ['systemctl', 'stop', "#{service}.service"]
+  else
+    cmd = ['service', service, 'stop']
+  end
+  _, _, retcode = run_cmd(PCSAuth.getSuperuserAuth(), *cmd)
   return (retcode == 0)
 end
 
@@ -2068,6 +2087,14 @@ def get_parsed_local_sbd_config()
   end
 end
 
+def get_sbd_service_name()
+  if ISSYSTEMCTL
+    return 'sbd'
+  else
+    return 'sbd_helper'
+  end
+end
+
 def write_booth_config(config, data)
   if config.include?('/')
     raise InvalidFileNameException.new(config)
@@ -2116,3 +2143,17 @@ def get_authfile_from_booth_config(config_data)
   }
   return authfile_path
 end
+
+def get_alerts(auth_user)
+  out, _, retcode = run_cmd(auth_user, PCS, 'alert', 'get_all_alerts')
+
+  if retcode !=  0
+    return nil
+  end
+
+  begin
+    return JSON.parse(out.join(""))
+  rescue JSON::ParserError
+    return nil
+  end
+end
diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
index 360fa1b..7f8d429 100644
--- a/pcsd/public/js/pcsd.js
+++ b/pcsd/public/js/pcsd.js
@@ -1808,8 +1808,10 @@ function remove_acl_item(id,item) {
       data["acl_perm_id"] = id.attr("acl_perm_id");
       item_label = "permission"
       break;
-    case "usergroup":
+    case "group":
+    case "user":
       data["item"] = "usergroup";
+      data["item_type"] = item;
       data["usergroup_id"] = id.attr("usergroup_id")
       data["role_id"] = id.attr("role_id")
       item_label = "user / group"
diff --git a/pcsd/remote.rb b/pcsd/remote.rb
index 97e63f1..0b2c674 100644
--- a/pcsd/remote.rb
+++ b/pcsd/remote.rb
@@ -123,6 +123,11 @@ def remote(params, request, auth_user)
       :get_fence_agent_metadata => method(:get_fence_agent_metadata),
       :manage_resource => method(:manage_resource),
       :unmanage_resource => method(:unmanage_resource),
+      :create_alert => method(:create_alert),
+      :update_alert => method(:update_alert),
+      :create_recipient => method(:create_recipient),
+      :update_recipient => method(:update_recipient),
+      :remove_alerts_and_recipients => method("remove_alerts_and_recipients"),
   }
 
   command = params[:command].to_sym
@@ -1548,7 +1553,7 @@ def get_avail_fence_agents(params, request, auth_user)
   if not allowed_for_local_cluster(auth_user, Permissions::READ)
     return 403, 'Permission denied'
   end
-  agents = getFenceAgents()
+  agents = getFenceAgents(auth_user)
   return JSON.generate(agents)
 end
 
@@ -1740,7 +1745,7 @@ def remove_acl_remote(params, request, auth_user)
     retval = remove_acl_permission(auth_user, params["acl_perm_id"])
   elsif params["item"] == "usergroup"
     retval = remove_acl_usergroup(
-      auth_user, params["role_id"],params["usergroup_id"]
+      auth_user, params["role_id"],params["usergroup_id"], params["item_type"]
     )
   else
     retval = "Error: Unknown removal request"
@@ -2306,9 +2311,9 @@ def check_sbd(param, request, auth_user)
   end
   out = {
     :sbd => {
-      :installed => is_service_installed?('sbd'),
-      :enabled => is_service_enabled?('sbd'),
-      :running => is_service_running?('sbd')
+      :installed => is_service_installed?(get_sbd_service_name()),
+      :enabled => is_service_enabled?(get_sbd_service_name()),
+      :running => is_service_running?(get_sbd_service_name())
     }
   }
   watchdog = param[:watchdog]
@@ -2377,7 +2382,7 @@ def sbd_disable(param, request, auth_user)
   unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
-  if disable_service('sbd')
+  if disable_service(get_sbd_service_name())
     msg = 'SBD disabled'
     $logger.info(msg)
     return [200, msg]
@@ -2392,7 +2397,7 @@ def sbd_enable(param, request, auth_user)
   unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
   end
-  if enable_service('sbd')
+  if enable_service(get_sbd_service_name())
     msg = 'SBD enabled'
     $logger.info(msg)
     return [200, msg]
@@ -2819,3 +2824,142 @@ def booth_get_config(params, request, auth_user)
     return [400, "Unable to read booth config/key file: #{e.message}"]
   end
 end
+
+def _hash_to_argument_list(hash)
+  result = []
+  if hash.kind_of?(Hash)
+    hash.each {|key, value|
+      value = '' if value.nil?
+      result << "#{key}=#{value}"
+    }
+  end
+  return result
+end
+
+def create_alert(params, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  path = params[:path]
+  unless path
+    return [400, 'Missing required parameter: path']
+  end
+  alert_id = params[:alert_id]
+  description = params[:description]
+  meta_attr_list = _hash_to_argument_list(params[:meta_attr])
+  instance_attr_list = _hash_to_argument_list(params[:instance_attr])
+  cmd = [PCS, 'alert', 'create', "path=#{path}"]
+  cmd << "id=#{alert_id}" if alert_id and alert_id != ''
+  cmd << "description=#{description}" if description and description != ''
+  cmd += ['options', *instance_attr_list] if instance_attr_list.any?
+  cmd += ['meta', *meta_attr_list] if meta_attr_list.any?
+  output, stderr, retval = run_cmd(auth_user, *cmd)
+  if retval != 0
+    return [400, "Unable to create alert: #{stderr.join("\n")}"]
+  end
+  return [200, 'Alert created']
+end
+
+def update_alert(params, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  alert_id = params[:alert_id]
+  unless alert_id
+    return [400, 'Missing required parameter: alert_id']
+  end
+  path = params[:path]
+  description = params[:description]
+  meta_attr_list = _hash_to_argument_list(params[:meta_attr])
+  instance_attr_list = _hash_to_argument_list(params[:instance_attr])
+  cmd = [PCS, 'alert', 'update', alert_id]
+  cmd << "path=#{path}" if path
+  cmd << "description=#{description}" if description
+  cmd += ['options', *instance_attr_list] if instance_attr_list.any?
+  cmd += ['meta', *meta_attr_list] if meta_attr_list.any?
+  output, stderr, retval = run_cmd(auth_user, *cmd)
+  if retval != 0
+    return [400, "Unable to update alert: #{stderr.join("\n")}"]
+  end
+  return [200, 'Alert updated']
+end
+
+def remove_alerts_and_recipients(params, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  alert_list = params[:alert_list]
+  recipient_list = params[:recipient_list]
+  if recipient_list.kind_of?(Array) and recipient_list.any?
+    output, stderr, retval = run_cmd(
+      auth_user, PCS, 'alert', 'recipient', 'remove', *recipient_list
+    )
+    if retval != 0
+      return [400, "Unable to remove recipients: #{stderr.join("\n")}"]
+    end
+  end
+  if alert_list.kind_of?(Array) and alert_list.any?
+    output, stderr, retval = run_cmd(
+      auth_user, PCS, 'alert', 'remove', *alert_list
+    )
+    if retval != 0
+      return [400, "Unable to remove alerts: #{stderr.join("\n")}"]
+    end
+  end
+  return [200, 'All removed']
+end
+
+def create_recipient(params, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  alert_id = params[:alert_id]
+  if not alert_id or alert_id.strip! == ''
+    return [400, 'Missing required paramter: alert_id']
+  end
+  value = params[:value]
+  if not value or value == ''
+    return [400, 'Missing required paramter: value']
+  end
+  recipient_id = params[:recipient_id]
+  description = params[:description]
+  meta_attr_list = _hash_to_argument_list(params[:meta_attr])
+  instance_attr_list = _hash_to_argument_list(params[:instance_attr])
+  cmd = [PCS, 'alert', 'recipient', 'add', alert_id, "value=#{value}"]
+  cmd << "id=#{recipient_id}" if recipient_id and recipient_id != ''
+  cmd << "description=#{description}" if description and description != ''
+  cmd += ['options', *instance_attr_list] if instance_attr_list.any?
+  cmd += ['meta', *meta_attr_list] if meta_attr_list.any?
+  output, stderr, retval = run_cmd(auth_user, *cmd)
+  if retval != 0
+    return [400, "Unable to create recipient: #{stderr.join("\n")}"]
+  end
+  return [200, 'Recipient created']
+end
+
+def update_recipient(params, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  recipient_id = params[:recipient_id]
+  if not recipient_id or recipient_id.strip! == ''
+    return [400, 'Missing required paramter: recipient_id']
+  end
+  value = params[:value]
+  if value and value.strip! == ''
+    return [400, 'Parameter value canot be empty string']
+  end
+  description = params[:description]
+  meta_attr_list = _hash_to_argument_list(params[:meta_attr])
+  instance_attr_list = _hash_to_argument_list(params[:instance_attr])
+  cmd = [PCS, 'alert', 'recipient', 'update', recipient_id]
+  cmd << "value=#{value}" if value
+  cmd << "description=#{description}" if description
+  cmd += ['options', *instance_attr_list] if instance_attr_list.any?
+  cmd += ['meta', *meta_attr_list] if meta_attr_list.any?
+  output, stderr, retval = run_cmd(auth_user, *cmd)
+  if retval != 0
+    return [400, "Unable to update recipient: #{stderr.join("\n")}"]
+  end
+  return [200, 'Recipient updated']
+end
diff --git a/pcsd/settings.rb b/pcsd/settings.rb
index e702585..e0b1f8b 100644
--- a/pcsd/settings.rb
+++ b/pcsd/settings.rb
@@ -5,10 +5,6 @@ CRT_FILE = PCSD_VAR_LOCATION + 'pcsd.crt'
 KEY_FILE = PCSD_VAR_LOCATION + 'pcsd.key'
 COOKIE_FILE = PCSD_VAR_LOCATION + 'pcsd.cookiesecret'
 
-OCF_ROOT = "/usr/lib/ocf"
-HEARTBEAT_AGENTS_DIR = "/usr/lib/ocf/resource.d/heartbeat/"
-PACEMAKER_AGENTS_DIR = "/usr/lib/ocf/resource.d/pacemaker/"
-NAGIOS_METADATA_DIR = '/usr/share/pacemaker/nagios/plugins-metadata/'
 PENGINE = "/usr/libexec/pacemaker/pengine"
 CIB_BINARY = '/usr/libexec/pacemaker/cib'
 CRM_MON = "/usr/sbin/crm_mon"
diff --git a/pcsd/settings.rb.debian b/pcsd/settings.rb.debian
index 27202e8..e3f2192 100644
--- a/pcsd/settings.rb.debian
+++ b/pcsd/settings.rb.debian
@@ -5,10 +5,6 @@ CRT_FILE = PCSD_VAR_LOCATION + 'pcsd.crt'
 KEY_FILE = PCSD_VAR_LOCATION + 'pcsd.key'
 COOKIE_FILE = PCSD_VAR_LOCATION + 'pcsd.cookiesecret'
 
-OCF_ROOT = "/usr/lib/ocf"
-HEARTBEAT_AGENTS_DIR = "/usr/lib/ocf/resource.d/heartbeat/"
-PACEMAKER_AGENTS_DIR = "/usr/lib/ocf/resource.d/pacemaker/"
-NAGIOS_METADATA_DIR = '/usr/share/pacemaker/nagios/plugins-metadata/'
 PENGINE = "/usr/lib/DEB_HOST_MULTIARCH/pacemaker/pengine"
 CIB_BINARY = '/usr/lib/DEB_HOST_MULTIARCH/pacemaker/cib'
 CRM_MON = "/usr/sbin/crm_mon"
@@ -20,6 +16,7 @@ PACEMAKERD = "/usr/sbin/pacemakerd"
 CIBADMIN = "/usr/sbin/cibadmin"
 SBD_CONFIG = "/etc/default/sbd"
 CIB_PATH = "/var/lib/pacemaker/cib/cib.xml"
+BOOTH_CONFIG_DIR='/etc/booth'
 
 COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR = "/etc/corosync/qnetd/nssdb"
 COROSYNC_QDEVICE_NET_SERVER_CA_FILE = (
diff --git a/pcsd/views/_acls.erb b/pcsd/views/_acls.erb
index 98bb356..e3da01c 100644
--- a/pcsd/views/_acls.erb
+++ b/pcsd/views/_acls.erb
@@ -111,7 +111,7 @@
               <tr {{bind-attr usergroup_id="user"}} {{bind-attr role_id="Pcs.aclsController.cur_role.name"}}>
                   <td>{{user}}</td>
                   <td style="text-align:center;">
-                    <a onclick="remove_acl_item($(this).closest('tr'),'usergroup');return false;" href="#" class="remove">X</a>
+                    <a onclick="remove_acl_item($(this).closest('tr'),'user');return false;" href="#" class="remove">X</a>
                   </td>
                 </tr>
               {{/each}}
@@ -137,7 +137,7 @@
               <tr {{bind-attr usergroup_id="group"}} {{bind-attr role_id="Pcs.aclsController.cur_role.name"}}>
                 <td>{{group}}</td>
                 <td style="text-align:center;">
-                  <a onclick="remove_acl_item($(this).closest('tr'),'usergroup');return false;" href="#" class="remove">X</a>
+                  <a onclick="remove_acl_item($(this).closest('tr'),'group');return false;" href="#" class="remove">X</a>
                 </td>
               </tr>
               {{/each}}
diff --git a/.pylintrc b/pylintrc
similarity index 99%
rename from .pylintrc
rename to pylintrc
index 6101381..1b26fa1 100644
--- a/.pylintrc
+++ b/pylintrc
@@ -92,7 +92,7 @@ dummy-variables-rgx=_$|dummy
 
 [FORMAT]
 # Maximum number of lines in a module
-max-module-lines=4616
+max-module-lines=4930
 # Maximum number of characters on a single line.
 max-line-length=1291
 
diff --git a/setup.py b/setup.py
index f698f2f..8def987 100644
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@ class CleanCommand(Command):
 
 setup(
     name='pcs',
-    version='0.9.154',
+    version='0.9.155',
     description='Pacemaker Configuration System',
     author='Chris Feist',
     author_email='cfeist at redhat.com',

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-ha/pcs.git



More information about the Debian-HA-Commits mailing list