[Debian-ha-commits] [pcs] 01/05: New upstream version 0.9.154

Valentin Vidic vvidic-guest at moszumanska.debian.org
Mon Sep 26 18:16:08 UTC 2016


This is an automated email from the git hooks/post-receive script.

vvidic-guest pushed a commit to branch master
in repository pcs.

commit 0ae7fe416b4adf3521370fbf38c819d92c9e3b6c
Author: Valentin Vidic <Valentin.Vidic at CARNet.hr>
Date:   Mon Sep 26 17:31:19 2016 +0200

    New upstream version 0.9.154
---
 .pylintrc                                          |    2 +-
 Makefile                                           |    2 +-
 pcs/acl.py                                         |    2 +-
 pcs/alert.py                                       |   57 +-
 pcs/app.py                                         |    7 +
 pcs/booth.py                                       |   78 ++
 pcs/cli/booth/__init__.py                          |    0
 pcs/cli/booth/command.py                           |  195 ++++
 pcs/cli/booth/env.py                               |  121 ++
 pcs/cli/booth/test/__init__.py                     |    0
 pcs/cli/booth/test/test_command.py                 |   54 +
 pcs/cli/booth/test/test_env.py                     |  118 ++
 pcs/cli/common/console_report.py                   |   13 +-
 pcs/cli/common/env.py                              |    2 +
 pcs/cli/common/lib_wrapper.py                      |   80 +-
 pcs/cli/common/middleware.py                       |    9 +-
 pcs/cli/common/parse_args.py                       |   27 +
 pcs/cli/common/test/test_completion.py             |    2 +-
 pcs/cli/common/test/test_console_report.py         |    2 +-
 pcs/cli/common/test/test_lib_wrapper.py            |   32 +-
 pcs/cli/common/test/test_middleware.py             |    8 +-
 pcs/cli/common/test/test_parse_args.py             |   86 +-
 pcs/cli/constraint/test/test_command.py            |    4 +-
 pcs/cli/constraint/test/test_console_report.py     |    2 +-
 pcs/cli/constraint/test/test_parse_args.py         |    9 +-
 pcs/cli/constraint_all/test/test_console_report.py |    4 +-
 pcs/cli/constraint_ticket/command.py               |    8 +
 pcs/cli/constraint_ticket/test/test_command.py     |   26 +-
 .../constraint_ticket/test/test_console_report.py  |    2 +-
 pcs/cli/constraint_ticket/test/test_parse_args.py  |    2 +-
 pcs/cluster.py                                     |  239 ++--
 pcs/common/env_file_role_codes.py                  |    9 +
 pcs/common/report_codes.py                         |   50 +-
 pcs/common/test/__init__.py                        |    0
 pcs/common/tools.py                                |    8 +
 pcs/config.py                                      |  111 +-
 pcs/constraint.py                                  |    3 +-
 pcs/lib/booth/__init__.py                          |    0
 pcs/lib/booth/config_exchange.py                   |   28 +
 pcs/lib/booth/config_files.py                      |  104 ++
 pcs/lib/booth/config_parser.py                     |   91 ++
 pcs/lib/booth/config_structure.py                  |  161 +++
 pcs/lib/booth/env.py                               |  149 +++
 pcs/lib/booth/reports.py                           |  418 +++++++
 pcs/lib/booth/resource.py                          |   97 ++
 pcs/lib/booth/status.py                            |   50 +
 pcs/lib/booth/sync.py                              |  210 ++++
 pcs/lib/booth/test/__init__.py                     |    0
 pcs/lib/booth/test/test_config_exchange.py         |   58 +
 pcs/lib/booth/test/test_config_files.py            |  288 +++++
 pcs/lib/booth/test/test_config_parser.py           |  171 +++
 pcs/lib/booth/test/test_config_structure.py        |  369 ++++++
 pcs/lib/booth/test/test_env.py                     |  225 ++++
 pcs/lib/booth/test/test_resource.py                |  190 +++
 pcs/lib/booth/test/test_status.py                  |  137 +++
 pcs/lib/booth/test/test_sync.py                    | 1215 ++++++++++++++++++++
 pcs/lib/cib/alert.py                               |  129 ++-
 pcs/lib/cib/constraint/ticket.py                   |   33 +-
 pcs/lib/cib/test/test_alert.py                     |  453 +++++++-
 pcs/lib/cib/test/test_constraint.py                |    4 +-
 pcs/lib/cib/test/test_constraint_colocation.py     |    4 +-
 pcs/lib/cib/test/test_constraint_order.py          |    4 +-
 pcs/lib/cib/test/test_constraint_ticket.py         |  138 ++-
 pcs/lib/cib/test/test_nvpair.py                    |    2 +-
 pcs/lib/cib/test/test_resource.py                  |    2 +-
 pcs/lib/cib/test/test_resource_set.py              |    4 +-
 pcs/lib/cib/tools.py                               |   59 +-
 pcs/lib/commands/alert.py                          |   45 +-
 pcs/lib/commands/booth.py                          |  383 ++++++
 pcs/lib/commands/constraint/ticket.py              |   23 +
 pcs/lib/commands/qdevice.py                        |   53 +-
 pcs/lib/commands/quorum.py                         |   54 +-
 pcs/lib/commands/sbd.py                            |   31 +-
 pcs/lib/commands/test/test_alert.py                |  115 +-
 pcs/lib/commands/test/test_booth.py                |  614 ++++++++++
 pcs/lib/commands/test/test_constraint_common.py    |    4 +-
 pcs/lib/commands/test/test_ticket.py               |   37 +-
 pcs/lib/corosync/config_facade.py                  |   15 +-
 pcs/lib/corosync/live.py                           |   29 +-
 pcs/lib/corosync/qdevice_client.py                 |    9 +-
 pcs/lib/corosync/qdevice_net.py                    |  105 +-
 pcs/lib/env.py                                     |   44 +-
 pcs/lib/env_file.py                                |  122 ++
 pcs/lib/errors.py                                  |   14 +
 pcs/lib/external.py                                |  165 ++-
 pcs/lib/pacemaker.py                               |   71 +-
 pcs/lib/reports.py                                 |  393 +++++--
 pcs/lib/resource_agent.py                          |   31 +-
 pcs/lib/sbd.py                                     |  111 +-
 pcs/lib/test/misc.py                               |   20 +
 pcs/lib/test/test_env_file.py                      |  187 +++
 pcs/lib/test/test_errors.py                        |   20 +
 pcs/lib/test/test_pacemaker_values.py              |    2 +-
 pcs/node.py                                        |  137 ++-
 pcs/pcs.8                                          |  122 +-
 pcs/prop.py                                        |   39 +-
 pcs/qdevice.py                                     |    4 +-
 pcs/quorum.py                                      |   69 +-
 pcs/resource.py                                    |  213 ++--
 pcs/settings.py.debian                             |    1 +
 pcs/settings_default.py                            |    4 +-
 pcs/status.py                                      |   62 +-
 pcs/stonith.py                                     |   23 +-
 pcs/test/resources/.gitignore                      |    1 +
 pcs/test/resources/corosync-qdevice.conf           |   34 +
 pcs/test/resources/tmp_keyfile                     |    1 +
 pcs/test/suite.py                                  |   16 +-
 pcs/test/test_acl.py                               |    2 +-
 pcs/test/test_alert.py                             |  195 +++-
 pcs/test/test_booth.py                             |  420 +++++++
 pcs/test/test_cluster.py                           |  582 ++++++----
 pcs/test/test_common_tools.py                      |   34 +-
 pcs/test/test_constraints.py                       |   46 +-
 pcs/test/test_lib_cib_acl.py                       |    2 +-
 pcs/test/test_lib_cib_tools.py                     |  142 ++-
 pcs/test/test_lib_commands_qdevice.py              |  159 ++-
 pcs/test/test_lib_commands_quorum.py               |  314 ++++-
 pcs/test/test_lib_commands_sbd.py                  |  138 ++-
 pcs/test/test_lib_corosync_config_facade.py        |   30 +-
 pcs/test/test_lib_corosync_config_parser.py        |    2 +-
 pcs/test/test_lib_corosync_live.py                 |   34 +-
 pcs/test/test_lib_corosync_qdevice_client.py       |   12 +-
 pcs/test/test_lib_corosync_qdevice_net.py          |  114 +-
 pcs/test/test_lib_env.py                           |    4 +-
 pcs/test/test_lib_external.py                      |  261 +++--
 pcs/test/test_lib_node.py                          |    2 +-
 pcs/test/test_lib_nodes_task.py                    |    4 +-
 pcs/test/test_lib_pacemaker.py                     |  363 ++++--
 pcs/test/test_lib_pacemaker_state.py               |    2 +-
 pcs/test/test_lib_resource_agent.py                |   43 +-
 pcs/test/test_lib_sbd.py                           |  141 ++-
 pcs/test/test_lib_tools.py                         |    2 +-
 pcs/test/test_node.py                              |  342 +++++-
 pcs/test/test_properties.py                        |  265 ++++-
 pcs/test/test_quorum.py                            |    2 +-
 pcs/test/test_resource.py                          |   59 +-
 pcs/test/test_rule.py                              |    2 +-
 pcs/test/test_stonith.py                           |    2 +-
 pcs/test/test_utils.py                             |   33 +-
 pcs/test/tools/color_text_runner.py                |    9 +-
 pcs/test/tools/misc.py                             |   14 +
 pcs/test/tools/pcs_mock.py                         |   13 -
 pcs/test/tools/pcs_unittest.py                     |  181 +++
 pcs/usage.py                                       |  189 ++-
 pcs/utils.py                                       |  452 +++++---
 pcsd/Gemfile                                       |    1 -
 pcsd/Gemfile.lock                                  |    2 -
 pcsd/Makefile                                      |    1 -
 pcsd/bootstrap.rb                                  |    2 +-
 pcsd/cluster_entity.rb                             |    8 +-
 pcsd/pcs.rb                                        |  205 +++-
 pcsd/pcsd                                          |    2 +
 pcsd/pcsd.debian                                   |    2 +
 pcsd/pcsd.rb                                       |  187 ++-
 pcsd/pcsd.service                                  |    1 +
 pcsd/pcsd.service-runner                           |   25 +-
 pcsd/public/css/style.css                          |   10 +
 pcsd/public/js/nodes-ember.js                      |  251 +++-
 pcsd/public/js/pcsd.js                             |  257 +++--
 pcsd/remote.rb                                     |  284 ++++-
 pcsd/settings.rb                                   |    1 +
 pcsd/settings.rb.debian                            |    2 +-
 pcsd/ssl.rb                                        |   25 +-
 pcsd/views/_dialogs.erb                            |   21 +
 pcsd/views/_resource.erb                           |    8 +-
 pcsd/views/main.erb                                |  165 ++-
 pcsd/views/manage.erb                              |   15 +-
 setup.py                                           |    2 +-
 168 files changed, 13663 insertions(+), 2015 deletions(-)

diff --git a/.pylintrc b/.pylintrc
index e378e6a..6101381 100644
--- a/.pylintrc
+++ b/.pylintrc
@@ -92,7 +92,7 @@ dummy-variables-rgx=_$|dummy
 
 [FORMAT]
 # Maximum number of lines in a module
-max-module-lines=4577
+max-module-lines=4616
 # Maximum number of characters on a single line.
 max-line-length=1291
 
diff --git a/Makefile b/Makefile
index cbbeb85..25fb87d 100644
--- a/Makefile
+++ b/Makefile
@@ -85,7 +85,7 @@ install:
 	$(PYTHON) setup.py install --root=$(or ${DESTDIR}, /) ${EXTRA_SETUP_OPTS}
 	mkdir -p ${DESTDIR}${PREFIX}/sbin/
 	mv ${DESTDIR}${PREFIX}/bin/pcs ${DESTDIR}${PREFIX}/sbin/pcs
-	install -D pcs/bash_completion.sh ${BASH_COMPLETION_DIR}/pcs
+	install -D -m644 pcs/bash_completion.sh ${BASH_COMPLETION_DIR}/pcs
 	install -m644 -D pcs/pcs.8 ${DESTDIR}/${MANDIR}/man8/pcs.8
 ifeq ($(IS_DEBIAN),true)
   ifeq ($(install_settings),true)
diff --git a/pcs/acl.py b/pcs/acl.py
index 118ceed..0378c10 100644
--- a/pcs/acl.py
+++ b/pcs/acl.py
@@ -55,7 +55,7 @@ def acl_cmd(argv):
 def acl_show(argv):
     dom = utils.get_cib_dom()
 
-    properties = prop.get_set_properties(defaults=prop.get_default_properties())
+    properties = utils.get_set_properties(defaults=prop.get_default_properties())
     acl_enabled = properties.get("enable-acl", "").lower()
     if is_true(acl_enabled):
         print("ACLs are enabled")
diff --git a/pcs/alert.py b/pcs/alert.py
index d3a6e28..17f4e8d 100644
--- a/pcs/alert.py
+++ b/pcs/alert.py
@@ -6,16 +6,18 @@ from __future__ import (
 )
 
 import sys
+from functools import partial
 
 from pcs import (
     usage,
     utils,
 )
 from pcs.cli.common.errors import CmdLineInputError
-from pcs.cli.common.parse_args import prepare_options
+from pcs.cli.common.parse_args import prepare_options, group_by_keywords
 from pcs.cli.common.console_report import indent
 from pcs.lib.errors import LibraryError
 
+parse_cmd_sections = partial(group_by_keywords, implicit_first_keyword="main")
 
 def alert_cmd(*args):
     argv = args[1]
@@ -61,22 +63,14 @@ def recipient_cmd(*args):
             recipient_update(*args)
         elif sub_cmd == "remove":
             recipient_remove(*args)
+        else:
+            raise CmdLineInputError()
     except CmdLineInputError as e:
         utils.exit_on_cmdline_input_errror(
             e, "alert", "recipient {0}".format(sub_cmd)
         )
 
 
-def parse_cmd_sections(arg_list, section_list):
-    output = dict([(section, []) for section in section_list + ["main"]])
-    cur_section = "main"
-    for arg in arg_list:
-        if arg in section_list:
-            cur_section = arg
-            continue
-        output[cur_section].append(arg)
-
-    return output
 
 
 def ensure_only_allowed_options(parameter_dict, allowed_list):
@@ -91,7 +85,7 @@ def alert_add(lib, argv, modifiers):
     if not argv:
         raise CmdLineInputError()
 
-    sections = parse_cmd_sections(argv, ["options", "meta"])
+    sections = parse_cmd_sections(argv, set(["options", "meta"]))
     main_args = prepare_options(sections["main"])
     ensure_only_allowed_options(main_args, ["id", "description", "path"])
 
@@ -110,7 +104,7 @@ def alert_update(lib, argv, modifiers):
 
     alert_id = argv[0]
 
-    sections = parse_cmd_sections(argv[1:], ["options", "meta"])
+    sections = parse_cmd_sections(argv[1:], set(["options", "meta"]))
     main_args = prepare_options(sections["main"])
     ensure_only_allowed_options(main_args, ["description", "path"])
 
@@ -135,46 +129,47 @@ def recipient_add(lib, argv, modifiers):
         raise CmdLineInputError()
 
     alert_id = argv[0]
-    recipient_value = argv[1]
 
-    sections = parse_cmd_sections(argv[2:], ["options", "meta"])
+    sections = parse_cmd_sections(argv[1:], set(["options", "meta"]))
     main_args = prepare_options(sections["main"])
-    ensure_only_allowed_options(main_args, ["description"])
+    ensure_only_allowed_options(main_args, ["description", "id", "value"])
 
     lib.alert.add_recipient(
         alert_id,
-        recipient_value,
+        main_args.get("value", None),
         prepare_options(sections["options"]),
         prepare_options(sections["meta"]),
-        main_args.get("description", None)
+        recipient_id=main_args.get("id", None),
+        description=main_args.get("description", None),
+        allow_same_value=modifiers["force"]
     )
 
 
 def recipient_update(lib, argv, modifiers):
-    if len(argv) < 2:
+    if len(argv) < 1:
         raise CmdLineInputError()
 
-    alert_id = argv[0]
-    recipient_value = argv[1]
+    recipient_id = argv[0]
 
-    sections = parse_cmd_sections(argv[2:], ["options", "meta"])
+    sections = parse_cmd_sections(argv[1:], set(["options", "meta"]))
     main_args = prepare_options(sections["main"])
-    ensure_only_allowed_options(main_args, ["description"])
+    ensure_only_allowed_options(main_args, ["description", "value"])
 
     lib.alert.update_recipient(
-        alert_id,
-        recipient_value,
+        recipient_id,
         prepare_options(sections["options"]),
         prepare_options(sections["meta"]),
-        main_args.get("description", None)
+        recipient_value=main_args.get("value", None),
+        description=main_args.get("description", None),
+        allow_same_value=modifiers["force"]
     )
 
 
 def recipient_remove(lib, argv, modifiers):
-    if len(argv) != 2:
+    if len(argv) != 1:
         raise CmdLineInputError()
 
-    lib.alert.remove_recipient(argv[0], argv[1])
+    lib.alert.remove_recipient(argv[0])
 
 
 def _nvset_to_str(nvset_obj):
@@ -219,9 +214,9 @@ def _alert_to_str(alert):
 
 
 def _recipient_to_str(recipient):
-    return ["Recipient: {value}".format(value=recipient["value"])] + indent(
-        __description_attributes_to_str(recipient), 1
-    )
+    return ["Recipient: {id} (value={value})".format(
+        value=recipient["value"], id=recipient["id"]
+    )] + indent(__description_attributes_to_str(recipient), 1)
 
 
 def print_alert_config(lib, argv, modifiers):
diff --git a/pcs/app.py b/pcs/app.py
index 3758ee4..ab9e970 100644
--- a/pcs/app.py
+++ b/pcs/app.py
@@ -13,6 +13,7 @@ logging.basicConfig()
 
 from pcs import (
     acl,
+    booth,
     cluster,
     config,
     constraint,
@@ -97,6 +98,7 @@ def main(argv=None):
             "token=", "token_coefficient=", "consensus=", "join=",
             "miss_count_const=", "fail_recv_const=",
             "corosync_conf=", "cluster_conf=",
+            "booth-conf=", "booth-key=",
             "remote", "watchdog=",
             #in pcs status - do not display resorce status on inactive node
             "hide-inactive",
@@ -199,6 +201,11 @@ def main(argv=None):
             args,
             utils.get_modificators()
         ),
+        "booth": lambda argv: booth.booth_cmd(
+            utils.get_library_wrapper(),
+            argv,
+            utils.get_modificators()
+        ),
     }
     if command not in cmd_map:
         usage.main()
diff --git a/pcs/booth.py b/pcs/booth.py
new file mode 100644
index 0000000..5ec41bf
--- /dev/null
+++ b/pcs/booth.py
@@ -0,0 +1,78 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import sys
+
+from pcs import usage
+from pcs import utils
+from pcs.cli.booth import command
+from pcs.cli.common.errors import CmdLineInputError
+from pcs.lib.errors import LibraryError
+from pcs.resource import resource_create, resource_remove, resource_restart
+
+
+def booth_cmd(lib, argv, modifiers):
+    """
+    routes booth command
+    """
+    if len(argv) < 1:
+        usage.booth()
+        sys.exit(1)
+
+    sub_cmd, argv_next = argv[0], argv[1:]
+    try:
+        if sub_cmd == "help":
+            usage.booth(argv)
+        elif sub_cmd == "config":
+            command.config_show(lib, argv_next, modifiers)
+        elif sub_cmd == "setup":
+            command.config_setup(lib, argv_next, modifiers)
+        elif sub_cmd == "destroy":
+            command.config_destroy(lib, argv_next, modifiers)
+        elif sub_cmd == "ticket":
+            if len(argv_next) < 1:
+                raise CmdLineInputError()
+            if argv_next[0] == "add":
+                command.config_ticket_add(lib, argv_next[1:], modifiers)
+            elif argv_next[0] == "remove":
+                command.config_ticket_remove(lib, argv_next[1:], modifiers)
+            elif argv_next[0] == "grant":
+                command.ticket_grant(lib, argv_next[1:], modifiers)
+            elif argv_next[0] == "revoke":
+                command.ticket_revoke(lib, argv_next[1:], modifiers)
+            else:
+                raise CmdLineInputError()
+        elif sub_cmd == "create":
+            command.get_create_in_cluster(resource_create, resource_remove)(
+                lib, argv_next, modifiers
+            )
+        elif sub_cmd == "remove":
+            command.get_remove_from_cluster(resource_remove)(
+                lib, argv_next, modifiers
+            )
+        elif sub_cmd == "restart":
+            command.get_restart(resource_restart)(lib, argv_next, modifiers)
+        elif sub_cmd == "sync":
+            command.sync(lib, argv_next, modifiers)
+        elif sub_cmd == "pull":
+            command.pull(lib, argv_next, modifiers)
+        elif sub_cmd == "enable":
+            command.enable(lib, argv_next, modifiers)
+        elif sub_cmd == "disable":
+            command.disable(lib, argv_next, modifiers)
+        elif sub_cmd == "start":
+            command.start(lib, argv_next, modifiers)
+        elif sub_cmd == "stop":
+            command.stop(lib, argv_next, modifiers)
+        elif sub_cmd == "status":
+            command.status(lib, argv_next, modifiers)
+        else:
+            raise CmdLineInputError()
+    except LibraryError as e:
+        utils.process_library_reports(e.args)
+    except CmdLineInputError as e:
+        utils.exit_on_cmdline_input_errror(e, "booth", sub_cmd)
diff --git a/pcs/cli/booth/__init__.py b/pcs/cli/booth/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/cli/booth/command.py b/pcs/cli/booth/command.py
new file mode 100644
index 0000000..72b2c73
--- /dev/null
+++ b/pcs/cli/booth/command.py
@@ -0,0 +1,195 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.cli.common.errors import CmdLineInputError
+from pcs.cli.common.parse_args import group_by_keywords, prepare_options
+
+
+DEFAULT_BOOTH_NAME = "booth"
+
+def __get_name(modifiers):
+    return  modifiers["name"] if modifiers["name"] else DEFAULT_BOOTH_NAME
+
+def config_setup(lib, arg_list, modifiers):
+    """
+    create booth config
+    """
+    peers = group_by_keywords(
+        arg_list,
+        set(["sites", "arbitrators"]),
+        keyword_repeat_allowed=False
+    )
+    if "sites" not in peers or not peers["sites"]:
+        raise CmdLineInputError()
+
+    booth_config = []
+    for site in peers["sites"]:
+        booth_config.append({"key": "site", "value": site, "details": []})
+    for arbitrator in peers["arbitrators"]:
+        booth_config.append({
+            "key": "arbitrator",
+            "value": arbitrator,
+            "details": [],
+        })
+
+    lib.booth.config_setup(booth_config, modifiers["force"])
+
+def config_destroy(lib, arg_list, modifiers):
+    """
+    destroy booth config
+    """
+    if arg_list:
+        raise CmdLineInputError()
+    lib.booth.config_destroy(ignore_config_load_problems=modifiers["force"])
+
+
+def config_show(lib, arg_list, modifiers):
+    """
+    print booth config
+    """
+    if len(arg_list) > 1:
+        raise CmdLineInputError()
+    node = None if not arg_list else arg_list[0]
+
+    print(lib.booth.config_text(DEFAULT_BOOTH_NAME, node), end="")
+
+
+def config_ticket_add(lib, arg_list, modifiers):
+    """
+    add ticket to current configuration
+    """
+    if not arg_list:
+        raise CmdLineInputError
+    lib.booth.config_ticket_add(
+        arg_list[0],
+        prepare_options(arg_list[1:]),
+        allow_unknown_options=modifiers["force"]
+    )
+
+def config_ticket_remove(lib, arg_list, modifiers):
+    """
+    add ticket to current configuration
+    """
+    if len(arg_list) != 1:
+        raise CmdLineInputError
+    lib.booth.config_ticket_remove(arg_list[0])
+
+def ticket_operation(lib_call, arg_list, modifiers):
+    site_ip = None
+    if len(arg_list) == 2:
+        site_ip = arg_list[1]
+    elif len(arg_list) != 1:
+        raise CmdLineInputError()
+
+    ticket = arg_list[0]
+    lib_call(__get_name(modifiers), ticket, site_ip)
+
+def ticket_revoke(lib, arg_list, modifiers):
+    ticket_operation(lib.booth.ticket_revoke, arg_list, modifiers)
+
+def ticket_grant(lib, arg_list, modifiers):
+    ticket_operation(lib.booth.ticket_grant, arg_list, modifiers)
+
+def get_create_in_cluster(resource_create, resource_remove):
+    #TODO resource_remove is provisional hack until resources are not moved to
+    #lib
+    def create_in_cluster(lib, arg_list, modifiers):
+        if len(arg_list) != 2 or arg_list[0] != "ip":
+            raise CmdLineInputError()
+        ip = arg_list[1]
+
+        lib.booth.create_in_cluster(
+            __get_name(modifiers),
+            ip,
+            resource_create,
+            resource_remove,
+        )
+    return create_in_cluster
+
+def get_remove_from_cluster(resource_remove):
+    #TODO resource_remove is provisional hack until resources are not moved to
+    #lib
+    def remove_from_cluster(lib, arg_list, modifiers):
+        if arg_list:
+            raise CmdLineInputError()
+
+        lib.booth.remove_from_cluster(
+            __get_name(modifiers),
+            resource_remove,
+            modifiers["force"],
+        )
+
+    return remove_from_cluster
+
+def get_restart(resource_restart):
+    #TODO resource_restart is provisional hack until resources are not moved to
+    #lib
+    def restart(lib, arg_list, modifiers):
+        if arg_list:
+            raise CmdLineInputError()
+
+        lib.booth.restart(
+            __get_name(modifiers),
+            resource_restart,
+            modifiers["force"],
+        )
+
+    return restart
+
+def sync(lib, arg_list, modifiers):
+    if arg_list:
+        raise CmdLineInputError()
+    lib.booth.config_sync(
+        DEFAULT_BOOTH_NAME,
+        skip_offline_nodes=modifiers["skip_offline_nodes"]
+    )
+
+
+def enable(lib, arg_list, modifiers):
+    if arg_list:
+        raise CmdLineInputError()
+    lib.booth.enable(DEFAULT_BOOTH_NAME)
+
+
+def disable(lib, arg_list, modifiers):
+    if arg_list:
+        raise CmdLineInputError()
+    lib.booth.disable(DEFAULT_BOOTH_NAME)
+
+
+def start(lib, arg_list, modifiers):
+    if arg_list:
+        raise CmdLineInputError()
+    lib.booth.start(DEFAULT_BOOTH_NAME)
+
+
+def stop(lib, arg_list, modifiers):
+    if arg_list:
+        raise CmdLineInputError()
+    lib.booth.stop(DEFAULT_BOOTH_NAME)
+
+
+def pull(lib, arg_list, modifiers):
+    if len(arg_list) != 1:
+        raise CmdLineInputError()
+    lib.booth.pull(arg_list[0], DEFAULT_BOOTH_NAME)
+
+
+def status(lib, arg_list, modifiers):
+    if arg_list:
+        raise CmdLineInputError()
+    booth_status = lib.booth.status(DEFAULT_BOOTH_NAME)
+    if booth_status.get("ticket"):
+        print("TICKETS:")
+        print(booth_status["ticket"])
+    if booth_status.get("peers"):
+        print("PEERS:")
+        print(booth_status["peers"])
+    if booth_status.get("status"):
+        print("DAEMON STATUS:")
+        print(booth_status["status"])
+
diff --git a/pcs/cli/booth/env.py b/pcs/cli/booth/env.py
new file mode 100644
index 0000000..918e487
--- /dev/null
+++ b/pcs/cli/booth/env.py
@@ -0,0 +1,121 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import os.path
+
+from pcs.cli.common import console_report
+from pcs.common import report_codes, env_file_role_codes as file_role_codes
+from pcs.lib.errors import LibraryEnvError
+
+
+def read_env_file(path):
+    try:
+        return {
+            "content": open(path).read() if os.path.isfile(path) else None
+        }
+    except EnvironmentError as e:
+        raise console_report.error(
+            "Unable to read {0}: {1}".format(path, e.strerror)
+        )
+
+def write_env_file(env_file, file_path):
+    try:
+        f = open(file_path, "wb" if env_file.get("is_binary", False) else "w")
+        f.write(env_file["content"])
+        f.close()
+    except EnvironmentError as e:
+        raise console_report.error(
+            "Unable to write {0}: {1}".format(file_path, e.strerror)
+        )
+
+def process_no_existing_file_expectation(file_role, env_file, file_path):
+    if(
+        env_file["no_existing_file_expected"]
+        and
+        os.path.exists(file_path)
+    ):
+        msg = "{0} {1} already exists".format(file_role, file_path)
+        if not env_file["can_overwrite_existing_file"]:
+            raise console_report.error(
+                "{0}, use --force to override".format(msg)
+            )
+        console_report.warn(msg)
+
+def is_missing_file_report(report, file_role_code):
+    return (
+        report.code == report_codes.FILE_DOES_NOT_EXIST
+        and
+        report.info["file_role"] == file_role_code
+    )
+
+def report_missing_file(file_role, file_path):
+    console_report.error(
+        "{0} '{1}' does not exist".format(file_role, file_path)
+    )
+
+def middleware_config(name, config_path, key_path):
+    if config_path and not key_path:
+        raise console_report.error(
+            "With --booth-conf must be specified --booth-key as well"
+        )
+
+    if key_path and not config_path:
+        raise console_report.error(
+            "With --booth-key must be specified --booth-conf as well"
+        )
+
+    is_mocked_environment = config_path and key_path
+
+    def create_booth_env():
+        if not is_mocked_environment:
+            return {"name": name}
+        return {
+            "name": name,
+            "config_file": read_env_file(config_path),
+            "key_file": read_env_file(key_path),
+            "key_path": key_path,
+        }
+
+    def flush(modified_env):
+        if not is_mocked_environment:
+            return
+        if not modified_env:
+            #TODO now this would not happen
+            #for more information see comment in
+            #pcs.cli.common.lib_wrapper.lib_env_to_cli_env
+            raise console_report.error("Error during library communication")
+
+        process_no_existing_file_expectation(
+            "booth config file",
+            modified_env["config_file"],
+            config_path
+        )
+        process_no_existing_file_expectation(
+            "booth key file",
+            modified_env["key_file"],
+            key_path
+        )
+        write_env_file(modified_env["key_file"], key_path)
+        write_env_file(modified_env["config_file"], config_path)
+
+    def apply(next_in_line, env, *args, **kwargs):
+        env.booth = create_booth_env()
+        try:
+            result_of_next = next_in_line(env, *args, **kwargs)
+        except LibraryEnvError as e:
+            for report in e.args:
+                if is_missing_file_report(report, file_role_codes.BOOTH_CONFIG):
+                    report_missing_file("Booth config file", config_path)
+                    e.sign_processed(report)
+                if is_missing_file_report(report, file_role_codes.BOOTH_KEY):
+                    report_missing_file("Booth key file", key_path)
+                    e.sign_processed(report)
+            raise e
+        flush(env.booth["modified_env"])
+        return result_of_next
+
+    return apply
diff --git a/pcs/cli/booth/test/__init__.py b/pcs/cli/booth/test/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/cli/booth/test/test_command.py b/pcs/cli/booth/test/test_command.py
new file mode 100644
index 0000000..8ba2c0e
--- /dev/null
+++ b/pcs/cli/booth/test/test_command.py
@@ -0,0 +1,54 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase
+
+from pcs.cli.booth import command
+from pcs.test.tools.pcs_unittest import mock
+
+
+class ConfigSetupTest(TestCase):
+    def test_call_lib_with_correct_args(self):
+        lib = mock.MagicMock()
+        lib.booth = mock.MagicMock()
+        lib.booth.config_setup = mock.MagicMock()
+
+        command.config_setup(
+            lib,
+            arg_list=[
+                "sites", "1.1.1.1", "2.2.2.2", "4.4.4.4",
+                "arbitrators", "3.3.3.3"
+            ],
+            modifiers={
+                "force": False,
+            }
+        )
+        lib.booth.config_setup.assert_called_once_with(
+            [
+                {"key": "site", "value": "1.1.1.1", "details": []},
+                {"key": "site", "value": "2.2.2.2", "details": []},
+                {"key": "site", "value": "4.4.4.4", "details": []},
+                {"key": "arbitrator", "value": "3.3.3.3", "details": []},
+            ],
+            False
+        )
+
+class ConfigTicketAddTest(TestCase):
+    def test_call_lib_with_ticket_name(self):
+        lib = mock.MagicMock()
+        lib.booth = mock.MagicMock()
+        lib.booth.config_ticket_add = mock.MagicMock()
+        command.config_ticket_add(
+            lib,
+            arg_list=["TICKET_A", "timeout=10"],
+            modifiers={"force": True}
+        )
+        lib.booth.config_ticket_add.assert_called_once_with(
+            "TICKET_A",
+            {"timeout": "10"},
+            allow_unknown_options=True
+        )
diff --git a/pcs/cli/booth/test/test_env.py b/pcs/cli/booth/test/test_env.py
new file mode 100644
index 0000000..b1d80aa
--- /dev/null
+++ b/pcs/cli/booth/test/test_env.py
@@ -0,0 +1,118 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase
+
+from pcs.cli.booth.env import middleware_config
+from pcs.common import report_codes, env_file_role_codes
+from pcs.lib.errors import LibraryEnvError, ReportItem
+from pcs.test.tools.pcs_unittest import mock
+
+
+class BoothConfTest(TestCase):
+    @mock.patch("pcs.cli.booth.env.os.path.isfile")
+    def test_sucessfully_care_about_local_file(self, mock_is_file):
+        #setup, fixtures
+        def next_in_line(env):
+            env.booth["modified_env"] = {
+                "config_file": {
+                    "content": "file content",
+                    "no_existing_file_expected": False,
+                },
+                "key_file": {
+                    "content": "key file content",
+                    "no_existing_file_expected": False,
+                }
+            }
+            return "call result"
+        mock_is_file.return_value = True
+        mock_env = mock.MagicMock()
+
+        mock_open = mock.mock_open()
+        with mock.patch(
+            "pcs.cli.booth.env.open",
+            mock_open,
+            create=True
+        ):
+            #run tested code
+            booth_conf_middleware = middleware_config(
+                "booth-name",
+                "/local/file/path.conf",
+                "/local/file/path.key",
+            )
+
+            self.assertEqual(
+                "call result",
+                booth_conf_middleware(next_in_line, mock_env)
+            )
+
+        #assertions
+        self.assertEqual(mock_is_file.mock_calls,[
+            mock.call("/local/file/path.conf"),
+            mock.call("/local/file/path.key"),
+        ])
+
+        self.assertEqual(mock_env.booth["name"], "booth-name")
+        self.assertEqual(mock_env.booth["config_file"], {"content": ""})
+        self.assertEqual(mock_env.booth["key_file"], {"content": ""})
+
+        self.assertEqual(mock_open.mock_calls, [
+            mock.call(u'/local/file/path.conf'),
+            mock.call().read(),
+            mock.call(u'/local/file/path.key'),
+            mock.call().read(),
+            mock.call(u'/local/file/path.key', u'w'),
+            mock.call().write(u'key file content'),
+            mock.call().close(),
+            mock.call(u'/local/file/path.conf', u'w'),
+            mock.call().write(u'file content'),
+            mock.call().close(),
+        ])
+
+    @mock.patch("pcs.cli.booth.env.console_report")
+    @mock.patch("pcs.cli.booth.env.os.path.isfile")
+    def test_catch_exactly_his_exception(
+        self, mock_is_file, mock_console_report
+    ):
+        next_in_line = mock.Mock(side_effect=LibraryEnvError(
+            ReportItem.error(report_codes.FILE_DOES_NOT_EXIST, "", info={
+                "file_role": env_file_role_codes.BOOTH_CONFIG,
+            }),
+            ReportItem.error(report_codes.FILE_DOES_NOT_EXIST, "", info={
+                "file_role": env_file_role_codes.BOOTH_KEY,
+            }),
+            ReportItem.error("OTHER ERROR", "", info={}),
+        ))
+        mock_is_file.return_value = False
+        mock_env = mock.MagicMock()
+
+        #run tested code
+        booth_conf_middleware = middleware_config(
+            "booth-name",
+            "/local/file/path.conf",
+            "/local/file/path.key",
+        )
+        raised_exception = []
+        def run_middleware():
+            try:
+                booth_conf_middleware(next_in_line, mock_env)
+            except Exception as e:
+                raised_exception.append(e)
+                raise e
+
+        self.assertRaises(LibraryEnvError, run_middleware)
+        self.assertEqual(1, len(raised_exception[0].unprocessed))
+        self.assertEqual("OTHER ERROR", raised_exception[0].unprocessed[0].code)
+
+        self.assertEqual(mock_console_report.error.mock_calls, [
+            mock.call(
+                "Booth config file '/local/file/path.conf' does not exist"
+            ),
+            mock.call(
+                "Booth key file '/local/file/path.key' does not exist"
+            ),
+        ])
diff --git a/pcs/cli/common/console_report.py b/pcs/cli/common/console_report.py
index 3d42798..e600168 100644
--- a/pcs/cli/common/console_report.py
+++ b/pcs/cli/common/console_report.py
@@ -8,10 +8,15 @@ from __future__ import (
 import sys
 
 
-def error(message, exit=True):
-    sys.stderr.write("Error: {0}\n".format(message))
-    if exit:
-        sys.exit(1)
+def warn(message):
+    sys.stdout.write(format_message(message, "Warning: "))
+
+def format_message(message, prefix):
+    return "{0}{1}\n".format(prefix, message)
+
+def error(message):
+    sys.stderr.write(format_message(message, "Error: "))
+    return SystemExit(1)
 
 def indent(line_list, indent_step=2):
     """
diff --git a/pcs/cli/common/env.py b/pcs/cli/common/env.py
index 2ba4f70..b1d951d 100644
--- a/pcs/cli/common/env.py
+++ b/pcs/cli/common/env.py
@@ -6,11 +6,13 @@ from __future__ import (
 )
 
 class Env(object):
+    #pylint: disable=too-many-instance-attributes
     def __init__(self):
         self.cib_data = None
         self.cib_upgraded = False
         self.user = None
         self.groups = None
         self.corosync_conf_data = None
+        self.booth = None
         self.auth_tokens_getter = None
         self.debug = False
diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
index c4b8342..99bfe35 100644
--- a/pcs/cli/common/lib_wrapper.py
+++ b/pcs/cli/common/lib_wrapper.py
@@ -5,27 +5,30 @@ from __future__ import (
     unicode_literals,
 )
 
-from collections import namedtuple
-from functools import partial
 import logging
+import sys
+from collections import namedtuple
 
 from pcs.cli.common import middleware
-
-#from pcs.lib import commands does not work: "commands" is package
-from pcs.lib.commands.constraint import colocation as constraint_colocation
-from pcs.lib.commands.constraint import order as constraint_order
-from pcs.lib.commands.constraint import ticket as constraint_ticket
+from pcs.cli.common.reports import (
+    LibraryReportProcessorToConsole,
+    process_library_reports
+)
 from pcs.lib.commands import (
+    booth,
     quorum,
     qdevice,
     sbd,
     alert,
 )
-from pcs.cli.common.reports import (
-    LibraryReportProcessorToConsole as LibraryReportProcessorToConsole,
+from pcs.lib.commands.constraint import (
+    colocation as constraint_colocation,
+    order as constraint_order,
+    ticket as constraint_ticket
 )
-
 from pcs.lib.env import LibraryEnvironment
+from pcs.lib.errors import LibraryEnvError
+
 
 _CACHE = {}
 
@@ -40,7 +43,8 @@ def cli_env_to_lib_env(cli_env):
         cli_env.groups,
         cli_env.cib_data,
         cli_env.corosync_conf_data,
-        cli_env.auth_tokens_getter,
+        booth=cli_env.booth,
+        auth_tokens_getter=cli_env.auth_tokens_getter,
     )
 
 def lib_env_to_cli_env(lib_env, cli_env):
@@ -49,6 +53,19 @@ def lib_env_to_cli_env(lib_env, cli_env):
         cli_env.cib_upgraded = lib_env.cib_upgraded
     if not lib_env.is_corosync_conf_live:
         cli_env.corosync_conf_data = lib_env.get_corosync_conf_data()
+
+    #TODO
+    #now we know: if is in cli_env booth is in lib_env as well
+    #when we communicate with the library over the network we will need extra
+    #sanitization here
+    #this applies generally, not only for booth
+    #corosync_conf and cib suffers with this problem as well but in this cases
+    #it is dangerously hidden: when inconsistency between cli and lib
+    #environment inconsitency occurs, original content is put to file (which is
+    #wrong)
+    if cli_env.booth:
+        cli_env.booth["modified_env"] = lib_env.booth.export()
+
     return cli_env
 
 def bind(cli_env, run_with_middleware, run_library_command):
@@ -62,7 +79,17 @@ def bind(cli_env, run_with_middleware, run_library_command):
         lib_env_to_cli_env(lib_env, cli_env)
 
         return lib_call_result
-    return partial(run_with_middleware, run, cli_env)
+
+    def decorated_run(*args, **kwargs):
+        try:
+            return run_with_middleware(run, cli_env, *args, **kwargs)
+        except LibraryEnvError as e:
+            process_library_reports(e.unprocessed)
+            #TODO we use explicit exit here - process_library_reports stil has
+            #possibility to not exit - it will need deeper rethinking
+            sys.exit(1)
+
+    return decorated_run
 
 def bind_all(env, run_with_middleware, dictionary):
     return wrapper(dict(
@@ -105,6 +132,7 @@ def load_module(env, middleware_factory, name):
                 'set': constraint_ticket.create_with_set,
                 'show': constraint_ticket.show,
                 'add': constraint_ticket.create,
+                'remove': constraint_ticket.remove,
             }
         )
 
@@ -172,6 +200,34 @@ def load_module(env, middleware_factory, name):
             }
         )
 
+    if name == "booth":
+        return bind_all(
+            env,
+            middleware.build(
+                middleware_factory.booth_conf,
+                middleware_factory.cib
+            ),
+            {
+                "config_setup": booth.config_setup,
+                "config_destroy": booth.config_destroy,
+                "config_text": booth.config_text,
+                "config_ticket_add": booth.config_ticket_add,
+                "config_ticket_remove": booth.config_ticket_remove,
+                "create_in_cluster": booth.create_in_cluster,
+                "remove_from_cluster": booth.remove_from_cluster,
+                "restart": booth.restart,
+                "config_sync": booth.config_sync,
+                "enable": booth.enable_booth,
+                "disable": booth.disable_booth,
+                "start": booth.start_booth,
+                "stop": booth.stop_booth,
+                "pull": booth.pull_config,
+                "status": booth.get_status,
+                "ticket_grant": booth.ticket_grant,
+                "ticket_revoke": booth.ticket_revoke,
+            }
+        )
+
     raise Exception("No library part '{0}'".format(name))
 
 class Library(object):
diff --git a/pcs/cli/common/middleware.py b/pcs/cli/common/middleware.py
index e53e138..9254a12 100644
--- a/pcs/cli/common/middleware.py
+++ b/pcs/cli/common/middleware.py
@@ -29,11 +29,12 @@ def cib(use_local_cib, load_cib_content, write_cib):
     """
     def apply(next_in_line, env, *args, **kwargs):
         if use_local_cib:
-            env.cib_data = load_cib_content()
+            original_content = load_cib_content()
+            env.cib_data = original_content
 
         result_of_next = next_in_line(env, *args, **kwargs)
 
-        if use_local_cib:
+        if use_local_cib and env.cib_data != original_content:
             write_cib(env.cib_data, env.cib_upgraded)
 
         return result_of_next
@@ -45,7 +46,7 @@ def corosync_conf_existing(local_file_path):
             try:
                 env.corosync_conf_data = open(local_file_path).read()
             except EnvironmentError as e:
-                console_report.error("Unable to read {0}: {1}".format(
+                raise console_report.error("Unable to read {0}: {1}".format(
                     local_file_path,
                     e.strerror
                 ))
@@ -58,7 +59,7 @@ def corosync_conf_existing(local_file_path):
                 f.write(env.corosync_conf_data)
                 f.close()
             except EnvironmentError as e:
-                console_report.error("Unable to write {0}: {1}".format(
+                raise console_report.error("Unable to write {0}: {1}".format(
                     local_file_path,
                     e.strerror
                 ))
diff --git a/pcs/cli/common/parse_args.py b/pcs/cli/common/parse_args.py
index 3b01775..d17c5da 100644
--- a/pcs/cli/common/parse_args.py
+++ b/pcs/cli/common/parse_args.py
@@ -25,3 +25,30 @@ def prepare_options(cmdline_args):
         name, value = arg.split("=", 1)
         options[name] = value
     return options
+
+def group_by_keywords(
+    arg_list, keyword_set,
+    implicit_first_keyword=None, keyword_repeat_allowed=True,
+):
+    groups = dict([(keyword, []) for keyword in keyword_set])
+    if implicit_first_keyword:
+        groups[implicit_first_keyword] = []
+
+    if not arg_list:
+        return groups
+
+    used_keywords = []
+    if implicit_first_keyword:
+        used_keywords.append(implicit_first_keyword)
+    elif arg_list[0] not in keyword_set:
+        raise CmdLineInputError()
+
+    for arg in arg_list:
+        if arg in list(groups.keys()):
+            if arg in used_keywords and not keyword_repeat_allowed:
+                raise CmdLineInputError()
+            used_keywords.append(arg)
+        else:
+            groups[used_keywords[-1]].append(arg)
+
+    return groups
diff --git a/pcs/cli/common/test/test_completion.py b/pcs/cli/common/test/test_completion.py
index 865da2c..daec1bc 100644
--- a/pcs/cli/common/test/test_completion.py
+++ b/pcs/cli/common/test/test_completion.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
 from pcs.cli.common.completion import (
     _find_suggestions,
diff --git a/pcs/cli/common/test/test_console_report.py b/pcs/cli/common/test/test_console_report.py
index 23cf8e9..63fe55c 100644
--- a/pcs/cli/common/test/test_console_report.py
+++ b/pcs/cli/common/test/test_console_report.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 from pcs.cli.common.console_report import indent
 
 class IndentTest(TestCase):
diff --git a/pcs/cli/common/test/test_lib_wrapper.py b/pcs/cli/common/test/test_lib_wrapper.py
index f34d2d0..149e612 100644
--- a/pcs/cli/common/test/test_lib_wrapper.py
+++ b/pcs/cli/common/test/test_lib_wrapper.py
@@ -4,10 +4,12 @@ from __future__ import (
     print_function,
     unicode_literals,
 )
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
-from pcs.cli.common.lib_wrapper import Library
-from pcs.test.tools.pcs_mock import mock
+from pcs.cli.common.lib_wrapper import Library, bind
+from pcs.test.tools.pcs_unittest import mock
+from pcs.lib.errors import ReportItem
+from pcs.lib.errors import LibraryEnvError
 
 class LibraryWrapperTest(TestCase):
     def test_raises_for_bad_path(self):
@@ -30,6 +32,28 @@ class LibraryWrapperTest(TestCase):
         mock_middleware_factory = mock.MagicMock()
         mock_middleware_factory.cib = dummy_middleware
         mock_middleware_factory.corosync_conf_existing = dummy_middleware
-        Library('env', mock_middleware_factory).constraint_order.set('first', second="third")
+        mock_env = mock.MagicMock()
+        Library(mock_env, mock_middleware_factory).constraint_order.set(
+            'first', second="third"
+        )
 
         mock_order_set.assert_called_once_with(lib_env, "first", second="third")
+
+class BindTest(TestCase):
+    @mock.patch("pcs.cli.common.lib_wrapper.process_library_reports")
+    def test_report_unprocessed_library_env_errors(self, mock_process_report):
+        report1 = ReportItem.error("OTHER ERROR", "", info={})
+        report2 = ReportItem.error("OTHER ERROR", "", info={})
+        report3 = ReportItem.error("OTHER ERROR", "", info={})
+        e = LibraryEnvError(report1, report2, report3)
+        e.sign_processed(report2)
+        mock_middleware = mock.Mock(side_effect=e)
+
+        binded = bind(
+            cli_env=None,
+            run_with_middleware=mock_middleware,
+            run_library_command=None
+        )
+
+        self.assertRaises(SystemExit, lambda: binded(cli_env=None))
+        mock_process_report.assert_called_once_with([report1, report3])
diff --git a/pcs/cli/common/test/test_middleware.py b/pcs/cli/common/test/test_middleware.py
index 6179882..7eefbca 100644
--- a/pcs/cli/common/test/test_middleware.py
+++ b/pcs/cli/common/test/test_middleware.py
@@ -5,8 +5,9 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
-import pcs.cli.common.middleware
+from pcs.test.tools.pcs_unittest import TestCase
+
+from pcs.cli.common import middleware
 
 
 class MiddlewareBuildTest(TestCase):
@@ -29,7 +30,7 @@ class MiddlewareBuildTest(TestCase):
             next(lib, argv, modificators)
             log.append('m2 done')
 
-        run_with_middleware = pcs.cli.common.middleware.build(m1, m2)
+        run_with_middleware = middleware.build(m1, m2)
         run_with_middleware(command, "1", "2", "3")
         self.assertEqual(log, [
             'm1 start: 1, 2, 3',
@@ -38,3 +39,4 @@ class MiddlewareBuildTest(TestCase):
             'm2 done',
             'm1 done',
         ])
+
diff --git a/pcs/cli/common/test/test_parse_args.py b/pcs/cli/common/test/test_parse_args.py
index 1d6c4b0..23704b9 100644
--- a/pcs/cli/common/test/test_parse_args.py
+++ b/pcs/cli/common/test/test_parse_args.py
@@ -5,8 +5,12 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
-from pcs.cli.common.parse_args import split_list, prepare_options
+from pcs.test.tools.pcs_unittest import TestCase
+from pcs.cli.common.parse_args import(
+    split_list,
+    prepare_options,
+    group_by_keywords,
+)
 from pcs.cli.common.errors import CmdLineInputError
 
 
@@ -42,3 +46,81 @@ class SplitListTest(TestCase):
             [[], ['a', 'b'], ['c', 'd'], []],
             split_list(['|','a', 'b', '|', 'c', 'd', "|"], '|')
         )
+
+class SplitByKeywords(TestCase):
+    def test_split_with_implicit_first_keyword(self):
+        self.assertEqual(
+            group_by_keywords(
+                [0, "first", 1, 2, "second", 3],
+                set(["first", "second"]),
+                implicit_first_keyword="zero"
+            ),
+            {
+                "zero": [0],
+                "first": [1, 2],
+                "second": [3],
+            }
+        )
+
+    def test_splict_without_implict_keyword(self):
+        self.assertEqual(
+            group_by_keywords(
+                ["first", 1, 2, "second", 3],
+                set(["first", "second"]),
+            ),
+            {
+                "first": [1, 2],
+                "second": [3],
+            }
+        )
+
+    def test_raises_when_args_do_not_start_with_keyword_nor_implicit(self):
+        self.assertRaises(CmdLineInputError, lambda: group_by_keywords(
+            [0, "first", 1, 2, "second", 3],
+            set(["first", "second"]),
+        ))
+
+    def test_returns_dict_with_empty_lists_for_no_args(self):
+        self.assertEqual(
+            group_by_keywords(
+                [],
+                set(["first", "second"])
+            ),
+            {
+                "first": [],
+                "second": [],
+            }
+        )
+
+    def test_returns_dict_with_empty_lists_for_no_args_implicit_case(self):
+        self.assertEqual(
+            group_by_keywords(
+                [],
+                set(["first", "second"]),
+                implicit_first_keyword="zero",
+            ),
+            {
+                "zero": [],
+                "first": [],
+                "second": [],
+            }
+        )
+
+    def test_allow_keywords_repeating(self):
+        self.assertEqual(
+            group_by_keywords(
+                ["first", 1, 2, "second", 3, "first", 4],
+                set(["first", "second"]),
+            ),
+            {
+                "first": [1, 2, 4],
+                "second": [3],
+            }
+        )
+
+    def test_can_disallow_keywords_repeating(self):
+        self.assertRaises(CmdLineInputError, lambda: group_by_keywords(
+            ["first", 1, 2, "second", 3, "first"],
+            set(["first", "second"]),
+            keyword_repeat_allowed=False,
+        ))
diff --git a/pcs/cli/constraint/test/test_command.py b/pcs/cli/constraint/test/test_command.py
index 5b493cd..6a79e00 100644
--- a/pcs/cli/constraint/test/test_command.py
+++ b/pcs/cli/constraint/test/test_command.py
@@ -5,10 +5,10 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 from pcs.cli.constraint import command
 
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 
 def fixture_constraint():
     return {
diff --git a/pcs/cli/constraint/test/test_console_report.py b/pcs/cli/constraint/test/test_console_report.py
index b20bc80..084124c 100644
--- a/pcs/cli/constraint/test/test_console_report.py
+++ b/pcs/cli/constraint/test/test_console_report.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 from pcs.cli.constraint import console_report
 
 class OptionsTest(TestCase):
diff --git a/pcs/cli/constraint/test/test_parse_args.py b/pcs/cli/constraint/test/test_parse_args.py
index 7673023..484cb8d 100644
--- a/pcs/cli/constraint/test/test_parse_args.py
+++ b/pcs/cli/constraint/test/test_parse_args.py
@@ -5,16 +5,11 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
 from pcs.cli.common.errors import CmdLineInputError
 from pcs.cli.constraint.parse_args import prepare_set_args, prepare_resource_sets
-
-
-try:
-    import unittest.mock as mock
-except ImportError:
-    import mock
+from pcs.test.tools.pcs_unittest import mock
 
 
 @mock.patch("pcs.cli.common.parse_args.prepare_options")
diff --git a/pcs/cli/constraint_all/test/test_console_report.py b/pcs/cli/constraint_all/test/test_console_report.py
index 1cf5721..61be2cc 100644
--- a/pcs/cli/constraint_all/test/test_console_report.py
+++ b/pcs/cli/constraint_all/test/test_console_report.py
@@ -5,8 +5,8 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import TestCase
+from pcs.test.tools.pcs_unittest import mock
 from pcs.cli.constraint_all import console_report
 
 class ConstraintTest(TestCase):
diff --git a/pcs/cli/constraint_ticket/command.py b/pcs/cli/constraint_ticket/command.py
index ab70434..583ba9e 100644
--- a/pcs/cli/constraint_ticket/command.py
+++ b/pcs/cli/constraint_ticket/command.py
@@ -8,6 +8,7 @@ from __future__ import (
 from pcs.cli.common.errors import CmdLineInputError
 from pcs.cli.constraint import command
 from pcs.cli.constraint_ticket import parse_args, console_report
+from pcs.cli.common.console_report import error
 
 def create_with_set(lib, argv, modificators):
     """
@@ -52,6 +53,13 @@ def add(lib, argv, modificators):
         duplication_alowed=modificators["force"],
     )
 
+def remove(lib, argv, modificators):
+    if len(argv) != 2:
+        raise CmdLineInputError()
+    ticket, resource_id = argv
+    if not lib.constraint_ticket.remove(ticket, resource_id):
+        raise error("no matching ticket constraint found")
+
 def show(lib, argv, modificators):
     """
     show all ticket constraints
diff --git a/pcs/cli/constraint_ticket/test/test_command.py b/pcs/cli/constraint_ticket/test/test_command.py
index 045d336..9ca7817 100644
--- a/pcs/cli/constraint_ticket/test/test_command.py
+++ b/pcs/cli/constraint_ticket/test/test_command.py
@@ -5,9 +5,9 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 from pcs.cli.common.errors import CmdLineInputError
 from pcs.cli.constraint_ticket import command
 
@@ -65,3 +65,25 @@ class AddTest(TestCase):
             resource_in_clone_alowed=True,
             duplication_alowed=True,
         )
+
+class RemoveTest(TestCase):
+    def test_refuse_args_count(self):
+        self.assertRaises(CmdLineInputError, lambda: command.remove(
+            mock.MagicMock(),
+            ["TICKET"],
+            {},
+        ))
+        self.assertRaises(CmdLineInputError, lambda: command.remove(
+            mock.MagicMock(),
+            ["TICKET", "RESOURCE", "SOMETHING_ELSE"],
+            {},
+        ))
+
+    def test_call_library_remove_with_correct_attrs(self):
+        lib = mock.MagicMock(
+            constraint_ticket=mock.MagicMock(remove=mock.Mock())
+        )
+        command.remove(lib, ["TICKET", "RESOURCE"], {})
+        lib.constraint_ticket.remove.assert_called_once_with(
+            "TICKET", "RESOURCE",
+        )
diff --git a/pcs/cli/constraint_ticket/test/test_console_report.py b/pcs/cli/constraint_ticket/test/test_console_report.py
index b352287..11af2e2 100644
--- a/pcs/cli/constraint_ticket/test/test_console_report.py
+++ b/pcs/cli/constraint_ticket/test/test_console_report.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 from pcs.cli.constraint_ticket import console_report
 
 class ConstraintPlainTest(TestCase):
diff --git a/pcs/cli/constraint_ticket/test/test_parse_args.py b/pcs/cli/constraint_ticket/test/test_parse_args.py
index 9d23167..4a592c2 100644
--- a/pcs/cli/constraint_ticket/test/test_parse_args.py
+++ b/pcs/cli/constraint_ticket/test/test_parse_args.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 from pcs.cli.constraint_ticket import parse_args
 from pcs.cli.common.errors import CmdLineInputError
 
diff --git a/pcs/cluster.py b/pcs/cluster.py
index 9d4798c..68c20f4 100644
--- a/pcs/cluster.py
+++ b/pcs/cluster.py
@@ -26,7 +26,7 @@ from pcs import (
     constraint,
     node,
     pcsd,
-    prop,
+    quorum,
     resource,
     settings,
     status,
@@ -42,6 +42,8 @@ from pcs.lib import (
     sbd as lib_sbd,
     reports as lib_reports,
 )
+from pcs.lib.booth import sync as booth_sync
+from pcs.lib.nodes_task import check_corosync_offline_on_nodes
 from pcs.lib.commands.quorum import _add_device_model_net
 from pcs.lib.corosync import (
     config_parser as corosync_conf_utils,
@@ -54,6 +56,8 @@ from pcs.lib.errors import (
 )
 from pcs.lib.external import (
     disable_service,
+    is_cman_cluster,
+    is_systemctl,
     NodeCommunicationException,
     node_communicator_exception_to_report_item,
 )
@@ -143,9 +147,9 @@ def cluster_cmd(argv):
         cluster_report(argv)
     elif (sub_cmd == "quorum"):
         if argv and argv[0] == "unblock":
-            cluster_quorum_unblock(argv[1:])
+            quorum.quorum_unblock_cmd(argv[1:])
         else:
-            usage.cluster(["quorum"])
+            usage.cluster()
             sys.exit(1)
     else:
         usage.cluster()
@@ -1074,7 +1078,7 @@ def disable_cluster_nodes(nodes):
     if len(error_list) > 0:
         utils.err("unable to disable all nodes\n" + "\n".join(error_list))
 
-def destroy_cluster(argv):
+def destroy_cluster(argv, keep_going=False):
     if len(argv) > 0:
         # stop pacemaker and resources while cluster is still quorate
         nodes = argv
@@ -1083,7 +1087,14 @@ def destroy_cluster(argv):
         # destroy will stop any remaining cluster daemons
         error_list = parallel_for_nodes(utils.destroyCluster, nodes, quiet=True)
         if error_list:
-            utils.err("unable to destroy cluster\n" + "\n".join(error_list))
+            if keep_going:
+                print(
+                    "Warning: unable to destroy cluster\n"
+                    +
+                    "\n".join(error_list)
+                )
+            else:
+                utils.err("unable to destroy cluster\n" + "\n".join(error_list))
 
 def stop_cluster(argv):
     if len(argv) > 0:
@@ -1132,7 +1143,17 @@ def stop_cluster(argv):
 
 def stop_cluster_pacemaker():
     print("Stopping Cluster (pacemaker)...")
-    output, retval = utils.run(["service", "pacemaker","stop"])
+    command = ["service", "pacemaker", "stop"]
+    if not is_systemctl() and is_cman_cluster(utils.cmd_runner()):
+        # If --skip-cman is not specified, pacemaker init script will stop cman
+        # and corosync as well. That way some of the nodes may stop cman before
+        # others stop pacemaker, which leads to quorum loss. We need to keep
+        # quorum until all pacemaker resources are stopped as some of them may
+        # need quorum to be able to stop.
+        # Additional parameters are not supported if "service" command is
+        # redirected to systemd.
+        command.append("--skip-cman")
+    output, retval = utils.run(command)
     if retval != 0:
         print(output)
         utils.err("unable to stop pacemaker")
@@ -1157,7 +1178,18 @@ def stop_cluster_corosync():
                 utils.err("unable to stop {0}".format(service))
 
 def kill_cluster(argv):
-    daemons = ["crmd", "pengine", "attrd", "lrmd", "stonithd", "cib", "pacemakerd", "corosync-qdevice", "corosync"]
+    daemons = [
+        "crmd",
+        "pengine",
+        "attrd",
+        "lrmd",
+        "stonithd",
+        "cib",
+        "pacemakerd",
+        "pacemaker_remoted",
+        "corosync-qdevice",
+        "corosync",
+    ]
     dummy_output, dummy_retval = utils.run(["killall", "-9"] + daemons)
 #    if dummy_retval != 0:
 #        print "Error: unable to execute killall -9"
@@ -1309,6 +1341,36 @@ def get_cib(argv):
         except IOError as e:
             utils.err("Unable to write to file '%s', %s" % (filename, e.strerror))
 
+
+def _ensure_cluster_is_offline_if_atb_should_be_enabled(
+    lib_env, node_num_modifier, skip_offline_nodes=False
+):
+    """
+    Check if cluster is offline if auto tie breaker should be enabled.
+    Raises LibraryError if ATB needs to be enabled cluster is not offline.
+
+    lib_env -- LibraryEnvironment
+    node_num_modifier -- number which wil be added to the number of nodes in
+        cluster when determining whenever ATB is needed.
+    skip_offline_nodes -- if True offline nodes will be skipped
+    """
+    corosync_conf = lib_env.get_corosync_conf()
+    if lib_sbd.atb_has_to_be_enabled(
+        lib_env.cmd_runner(), corosync_conf, node_num_modifier
+    ):
+        print(
+            "Warning: auto_tie_breaker quorum option will be enabled to make "
+            "SBD fencing effecive after this change. Cluster has to be offline "
+            "to be able to make this change."
+        )
+        check_corosync_offline_on_nodes(
+            lib_env.node_communicator(),
+            lib_env.report_processor,
+            corosync_conf.get_nodes(),
+            skip_offline_nodes
+        )
+
+
 def cluster_node(argv):
     if len(argv) != 2:
         usage.cluster()
@@ -1324,19 +1386,28 @@ def cluster_node(argv):
 
     node = argv[1]
     node0, node1 = utils.parse_multiring_node(node)
-
     if not node0:
         utils.err("missing ring 0 address of the node")
-    status,output = utils.checkAuthorization(node0)
-    if status == 2:
-        utils.err("pcsd is not running on %s" % node0)
-    elif status == 3:
-        utils.err(
-            "%s is not yet authenticated (try pcs cluster auth %s)"
-            % (node0, node0)
-        )
-    elif status != 0:
-        utils.err(output)
+
+    # allow to continue if removing a node with --force
+    if add_node or "--force" not in utils.pcs_options:
+        status, output = utils.checkAuthorization(node0)
+        if status != 0:
+            if status == 2:
+                msg = "pcsd is not running on {0}".format(node0)
+            elif status == 3:
+                msg = (
+                    "{node} is not yet authenticated "
+                    + " (try pcs cluster auth {node})"
+                ).format(node=node0)
+            else:
+                msg = output
+            if not add_node:
+                msg += ", use --force to override"
+            utils.err(msg)
+
+    lib_env = utils.get_lib_env()
+    modifiers = utils.get_modificators()
 
     if add_node == True:
         wait = False
@@ -1355,16 +1426,36 @@ def cluster_node(argv):
                 "cluster is not configured for RRP, "
                 "you must not specify ring 1 address for the node"
             )
-        corosync_conf = None
         (canAdd, error) =  utils.canAddNodeToCluster(node0)
         if not canAdd:
             utils.err("Unable to add '%s' to cluster: %s" % (node0, error))
 
-        lib_env = utils.get_lib_env()
         report_processor = lib_env.report_processor
         node_communicator = lib_env.node_communicator()
         node_addr = NodeAddresses(node0, node1)
+
+        # First set up everything else than corosync. Once the new node is
+        # present in corosync.conf / cluster.conf, it's considered part of a
+        # cluster and the node add command cannot be run again. So we need to
+        # minimize the amout of actions (and therefore possible failures) after
+        # adding the node to corosync.
         try:
+            # qdevice setup
+            if not utils.is_rhel6():
+                conf_facade = corosync_conf_facade.from_string(
+                    utils.getCorosyncConf()
+                )
+                qdevice_model, qdevice_model_options, _ = conf_facade.get_quorum_device_settings()
+                if qdevice_model == "net":
+                    _add_device_model_net(
+                        lib_env,
+                        qdevice_model_options["host"],
+                        conf_facade.get_cluster_name(),
+                        [node_addr],
+                        skip_offline_nodes=False
+                    )
+
+            # sbd setup
             if lib_sbd.is_sbd_enabled(utils.cmd_runner()):
                 if "--watchdog" not in utils.pcs_options:
                     watchdog = settings.sbd_watchdog_default
@@ -1374,6 +1465,10 @@ def cluster_node(argv):
                 else:
                     watchdog = utils.pcs_options["--watchdog"][0]
 
+                _ensure_cluster_is_offline_if_atb_should_be_enabled(
+                    lib_env, 1, modifiers["skip_offline_nodes"]
+                )
+
                 report_processor.process(lib_reports.sbd_check_started())
                 lib_sbd.check_sbd_on_node(
                     report_processor, node_communicator, node_addr, watchdog
@@ -1381,12 +1476,15 @@ def cluster_node(argv):
                 sbd_cfg = environment_file_to_dict(
                     lib_sbd.get_local_sbd_config()
                 )
-                sbd_cfg["SBD_WATCHDOG_DEV"] = watchdog
                 report_processor.process(
                     lib_reports.sbd_config_distribution_started()
                 )
                 lib_sbd.set_sbd_config_on_node(
-                    report_processor, node_communicator, node_addr, sbd_cfg
+                    report_processor,
+                    node_communicator,
+                    node_addr,
+                    sbd_cfg,
+                    watchdog
                 )
                 report_processor.process(lib_reports.sbd_enabling_started())
                 lib_sbd.enable_sbd_service_on_node(
@@ -1397,6 +1495,15 @@ def cluster_node(argv):
                 lib_sbd.disable_sbd_service_on_node(
                     report_processor, node_communicator, node_addr
                 )
+
+            # booth setup
+            booth_sync.send_all_config_to_node(
+                node_communicator,
+                report_processor,
+                node_addr,
+                rewrite_existing=modifiers["force"],
+                skip_wrong_config=modifiers["force"]
+            )
         except LibraryError as e:
             process_library_reports(e.args)
         except NodeCommunicationException as e:
@@ -1404,6 +1511,8 @@ def cluster_node(argv):
                 [node_communicator_exception_to_report_item(e)]
             )
 
+        # Now add the new node to corosync.conf / cluster.conf
+        corosync_conf = None
         for my_node in utils.getNodesFromCorosyncConf():
             retval, output = utils.addLocalNode(my_node, node0, node1)
             if retval != 0:
@@ -1439,24 +1548,6 @@ def cluster_node(argv):
                 except:
                     utils.err('Unable to communicate with pcsd')
 
-            # set qdevice-net certificates if needed
-            if not utils.is_rhel6():
-                try:
-                    conf_facade = corosync_conf_facade.from_string(
-                        corosync_conf
-                    )
-                    qdevice_model, qdevice_model_options, _ = conf_facade.get_quorum_device_settings()
-                    if qdevice_model == "net":
-                        _add_device_model_net(
-                            lib_env,
-                            qdevice_model_options["host"],
-                            conf_facade.get_cluster_name(),
-                            [node_addr],
-                            skip_offline_nodes=False
-                        )
-                except LibraryError as e:
-                    process_library_reports(e.args)
-
             print("Setting up corosync...")
             utils.setCorosyncConfig(node0, corosync_conf)
             if "--enable" in utils.pcs_options:
@@ -1515,9 +1606,16 @@ def cluster_node(argv):
                 )
             # else the node seems to be stopped already, we're ok to proceed
 
+        try:
+            _ensure_cluster_is_offline_if_atb_should_be_enabled(
+                lib_env, -1, modifiers["skip_offline_nodes"]
+            )
+        except LibraryError as e:
+            utils.process_library_reports(e.args)
+
         nodesRemoved = False
         c_nodes = utils.getNodesFromCorosyncConf()
-        destroy_cluster([node0])
+        destroy_cluster([node0], keep_going=("--force" in utils.pcs_options))
         for my_node in c_nodes:
             if my_node == node0:
                 continue
@@ -1886,60 +1984,13 @@ def cluster_remote_node(argv):
             nvpair.parentNode.removeChild(nvpair)
         dom = constraint.remove_constraints_containing_node(dom, hostname)
         utils.replace_cib_configuration(dom)
+        if not utils.usefile:
+            output, retval = utils.run([
+                "crm_node", "--force", "--remove", hostname
+            ])
+            if retval != 0:
+                utils.err("unable to remove: {0}".format(output))
     else:
         usage.cluster(["remote-node"])
         sys.exit(1)
 
-def cluster_quorum_unblock(argv):
-    if len(argv) > 0:
-        usage.quorum(["unblock"])
-        sys.exit(1)
-
-    if utils.is_rhel6():
-        utils.err("operation is not supported on CMAN clusters")
-
-    output, retval = utils.run(
-        ["corosync-cmapctl", "-g", "runtime.votequorum.wait_for_all_status"]
-    )
-    if retval != 0:
-        utils.err("unable to check quorum status")
-    if output.split("=")[-1].strip() != "1":
-        utils.err("cluster is not waiting for nodes to establish quorum")
-
-    unjoined_nodes = (
-        set(utils.getNodesFromCorosyncConf())
-        -
-        set(utils.getCorosyncActiveNodes())
-    )
-    if not unjoined_nodes:
-        utils.err("no unjoined nodes found")
-    if "--force" not in utils.pcs_options:
-        answer = utils.get_terminal_input(
-            (
-                "WARNING: If node(s) {nodes} are not powered off or they do"
-                + " have access to shared resources, data corruption and/or"
-                + " cluster failure may occur. Are you sure you want to"
-                + " continue? [y/N] "
-            ).format(nodes=", ".join(unjoined_nodes))
-        )
-        if answer.lower() not in ["y", "yes"]:
-            print("Canceled")
-            return
-    for node in unjoined_nodes:
-        stonith.stonith_confirm([node], skip_question=True)
-
-    output, retval = utils.run(
-        ["corosync-cmapctl", "-s", "quorum.cancel_wait_for_all", "u8", "1"]
-    )
-    if retval != 0:
-        utils.err("unable to cancel waiting for nodes")
-    print("Quorum unblocked")
-
-    startup_fencing = prop.get_set_properties().get("startup-fencing", "")
-    utils.set_cib_property(
-        "startup-fencing",
-        "false" if startup_fencing.lower() != "false" else "true"
-    )
-    utils.set_cib_property("startup-fencing", startup_fencing)
-    print("Waiting for nodes canceled")
-
diff --git a/pcs/common/env_file_role_codes.py b/pcs/common/env_file_role_codes.py
new file mode 100644
index 0000000..1f47387
--- /dev/null
+++ b/pcs/common/env_file_role_codes.py
@@ -0,0 +1,9 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+BOOTH_CONFIG = "BOOTH_CONFIG"
+BOOTH_KEY = "BOOTH_KEY"
diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
index 2b39938..9b05951 100644
--- a/pcs/common/report_codes.py
+++ b/pcs/common/report_codes.py
@@ -7,22 +7,56 @@ from __future__ import (
 
 # force cathegories
 FORCE_ACTIVE_RRP = "ACTIVE_RRP"
+FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE = "FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE"
+FORCE_BOOTH_DESTROY = "FORCE_BOOTH_DESTROY"
+FORCE_BOOTH_REMOVE_FROM_CIB = "FORCE_BOOTH_REMOVE_FROM_CIB"
 FORCE_CONSTRAINT_DUPLICATE = "CONSTRAINT_DUPLICATE"
 FORCE_CONSTRAINT_MULTIINSTANCE_RESOURCE = "CONSTRAINT_MULTIINSTANCE_RESOURCE"
+FORCE_FILE_OVERWRITE = "FORCE_FILE_OVERWRITE"
 FORCE_LOAD_THRESHOLD = "LOAD_THRESHOLD"
+FORCE_METADATA_ISSUE = "METADATA_ISSUE"
 FORCE_OPTIONS = "OPTIONS"
 FORCE_QDEVICE_MODEL = "QDEVICE_MODEL"
+FORCE_QDEVICE_USED = "QDEVICE_USED"
 FORCE_UNKNOWN_AGENT = "UNKNOWN_AGENT"
 FORCE_UNSUPPORTED_AGENT = "UNSUPPORTED_AGENT"
-FORCE_METADATA_ISSUE = "METADATA_ISSUE"
 SKIP_OFFLINE_NODES = "SKIP_OFFLINE_NODES"
+SKIP_UNREADABLE_CONFIG = "SKIP_UNREADABLE_CONFIG"
 
 AGENT_GENERAL_ERROR = "AGENT_GENERAL_ERROR"
 AGENT_NOT_FOUND = "AGENT_NOT_FOUND"
 BAD_CLUSTER_STATE_FORMAT = 'BAD_CLUSTER_STATE_FORMAT'
+BOOTH_ADDRESS_DUPLICATION = "BOOTH_ADDRESS_DUPLICATION"
+BOOTH_ALREADY_IN_CIB = "BOOTH_ALREADY_IN_CIB"
+BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP = "BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP"
+BOOTH_CANNOT_IDENTIFY_KEYFILE = "BOOTH_CANNOT_IDENTIFY_KEYFILE"
+BOOTH_CONFIG_ACCEPTED_BY_NODE = "BOOTH_CONFIG_ACCEPTED_BY_NODE"
+BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR = "BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR"
+BOOTH_CONFIG_DISTRIBUTION_STARTED = "BOOTH_CONFIG_DISTRIBUTION_STARTED"
+BOOTH_CONFIG_FILE_ALREADY_EXISTS = "BOOTH_CONFIG_FILE_ALREADY_EXISTS"
+BOOTH_CONFIG_IO_ERROR = "BOOTH_CONFIG_IO_ERROR"
+BOOTH_CONFIG_IS_USED = "BOOTH_CONFIG_IS_USED"
+BOOTH_CONFIG_READ_ERROR = "BOOTH_CONFIG_READ_ERROR"
+BOOTH_CONFIG_UNEXPECTED_LINES = "BOOTH_CONFIG_UNEXPECTED_LINES"
+BOOTH_DAEMON_STATUS_ERROR = "BOOTH_DAEMON_STATUS_ERROR"
+BOOTH_EVEN_PEERS_NUM = "BOOTH_EVEN_PEERS_NUM"
+BOOTH_FETCHING_CONFIG_FROM_NODE = "BOOTH_FETCHING_CONFIG_FROM_NODE"
+BOOTH_INVALID_CONFIG_NAME = "BOOTH_INVALID_CONFIG_NAME"
+BOOTH_INVALID_NAME = "BOOTH_INVALID_NAME"
+BOOTH_LACK_OF_SITES = "BOOTH_LACK_OF_SITES"
+BOOTH_MULTIPLE_TIMES_IN_CIB = "BOOTH_MULTIPLE_TIMES_IN_CIB"
+BOOTH_NOT_EXISTS_IN_CIB = "BOOTH_NOT_EXISTS_IN_CIB"
+BOOTH_PEERS_STATUS_ERROR = "BOOTH_PEERS_STATUS_ERROR"
+BOOTH_SKIPPING_CONFIG = "BOOTH_SKIPPING_CONFIG"
+BOOTH_TICKET_DOES_NOT_EXIST = "BOOTH_TICKET_DOES_NOT_EXIST"
+BOOTH_TICKET_DUPLICATE = "BOOTH_TICKET_DUPLICATE"
+BOOTH_TICKET_NAME_INVALID = "BOOTH_TICKET_NAME_INVALID"
+BOOTH_TICKET_OPERATION_FAILED = "BOOTH_TICKET_OPERATION_FAILED"
+BOOTH_TICKET_STATUS_ERROR = "BOOTH_TICKET_STATUS_ERROR"
+BOOTH_UNSUPORTED_FILE_LOCATION = "BOOTH_UNSUPORTED_FILE_LOCATION"
 CIB_ALERT_NOT_FOUND = "CIB_ALERT_NOT_FOUND"
 CIB_ALERT_RECIPIENT_ALREADY_EXISTS = "CIB_ALERT_RECIPIENT_ALREADY_EXISTS"
-CIB_ALERT_RECIPIENT_NOT_FOUND = "CIB_ALERT_RECIPIENT_NOT_FOUND"
+CIB_ALERT_RECIPIENT_VALUE_INVALID = "CIB_ALERT_RECIPIENT_VALUE_INVALID"
 CIB_CANNOT_FIND_MANDATORY_SECTION = "CIB_CANNOT_FIND_MANDATORY_SECTION"
 CIB_LOAD_ERROR_BAD_FORMAT = "CIB_LOAD_ERROR_BAD_FORMAT"
 CIB_LOAD_ERROR = "CIB_LOAD_ERROR"
@@ -37,6 +71,7 @@ CMAN_UDPU_RESTART_REQUIRED = 'CMAN_UDPU_RESTART_REQUIRED'
 CMAN_UNSUPPORTED_COMMAND = "CMAN_UNSUPPORTED_COMMAND"
 COMMON_ERROR = 'COMMON_ERROR'
 COMMON_INFO = 'COMMON_INFO'
+LIVE_ENVIRONMENT_REQUIRED = "LIVE_ENVIRONMENT_REQUIRED"
 COROSYNC_CONFIG_ACCEPTED_BY_NODE = "COROSYNC_CONFIG_ACCEPTED_BY_NODE"
 COROSYNC_CONFIG_DISTRIBUTION_STARTED = "COROSYNC_CONFIG_DISTRIBUTION_STARTED"
 COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR = "COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR"
@@ -46,12 +81,16 @@ COROSYNC_NOT_RUNNING_CHECK_STARTED = "COROSYNC_NOT_RUNNING_CHECK_STARTED"
 COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR = "COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR"
 COROSYNC_NOT_RUNNING_ON_NODE = "COROSYNC_NOT_RUNNING_ON_NODE"
 COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE = "COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE"
+COROSYNC_QUORUM_CANNOT_DISABLE_ATB_DUE_TO_SBD = "COROSYNC_QUORUM_CANNOT_DISABLE_ATB_DUE_TO_SBD"
 COROSYNC_QUORUM_GET_STATUS_ERROR = "COROSYNC_QUORUM_GET_STATUS_ERROR"
 COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR = "COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR"
 COROSYNC_RUNNING_ON_NODE = "COROSYNC_RUNNING_ON_NODE"
 CRM_MON_ERROR = "CRM_MON_ERROR"
 DUPLICATE_CONSTRAINTS_EXIST = "DUPLICATE_CONSTRAINTS_EXIST"
 EMPTY_RESOURCE_SET_LIST = "EMPTY_RESOURCE_SET_LIST"
+FILE_ALREADY_EXISTS = "FILE_ALREADY_EXISTS"
+FILE_DOES_NOT_EXIST = "FILE_DOES_NOT_EXIST"
+FILE_IO_ERROR = "FILE_IO_ERROR"
 ID_ALREADY_EXISTS = 'ID_ALREADY_EXISTS'
 ID_NOT_FOUND = 'ID_NOT_FOUND'
 IGNORED_CMAN_UNSUPPORTED_OPTION = 'IGNORED_CMAN_UNSUPPORTED_OPTION'
@@ -95,8 +134,10 @@ QDEVICE_INITIALIZATION_ERROR = "QDEVICE_INITIALIZATION_ERROR"
 QDEVICE_INITIALIZATION_SUCCESS = "QDEVICE_INITIALIZATION_SUCCESS"
 QDEVICE_NOT_DEFINED = "QDEVICE_NOT_DEFINED"
 QDEVICE_NOT_INITIALIZED = "QDEVICE_NOT_INITIALIZED"
+QDEVICE_NOT_RUNNING = "QDEVICE_NOT_RUNNING"
 QDEVICE_CLIENT_RELOAD_STARTED = "QDEVICE_CLIENT_RELOAD_STARTED"
 QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED = "QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED"
+QDEVICE_USED_BY_CLUSTERS = "QDEVICE_USED_BY_CLUSTERS"
 REQUIRED_OPTION_IS_MISSING = "REQUIRED_OPTION_IS_MISSING"
 RESOURCE_CLEANUP_ERROR = "RESOURCE_CLEANUP_ERROR"
 RESOURCE_CLEANUP_TOO_TIME_CONSUMING = 'RESOURCE_CLEANUP_TOO_TIME_CONSUMING'
@@ -117,6 +158,7 @@ SBD_DISABLING_STARTED = "SBD_DISABLING_STARTED"
 SBD_ENABLING_STARTED = "SBD_ENABLING_STARTED"
 SBD_NOT_INSTALLED = "SBD_NOT_INSTALLED"
 SBD_NOT_ENABLED = "SBD_NOT_ENABLED"
+SBD_REQUIRES_ATB = "SBD_REQUIRES_ATB"
 SERVICE_DISABLE_ERROR = "SERVICE_DISABLE_ERROR"
 SERVICE_DISABLE_STARTED = "SERVICE_DISABLE_STARTED"
 SERVICE_DISABLE_SUCCESS = "SERVICE_DISABLE_SUCCESS"
@@ -133,10 +175,14 @@ SERVICE_START_SUCCESS = "SERVICE_START_SUCCESS"
 SERVICE_STOP_ERROR = "SERVICE_STOP_ERROR"
 SERVICE_STOP_STARTED = "SERVICE_STOP_STARTED"
 SERVICE_STOP_SUCCESS = "SERVICE_STOP_SUCCESS"
+UNABLE_TO_DETERMINE_USER_UID = "UNABLE_TO_DETERMINE_USER_UID"
+UNABLE_TO_DETERMINE_GROUP_GID = "UNABLE_TO_DETERMINE_GROUP_GID"
 UNABLE_TO_GET_AGENT_METADATA = 'UNABLE_TO_GET_AGENT_METADATA'
 UNABLE_TO_READ_COROSYNC_CONFIG = "UNABLE_TO_READ_COROSYNC_CONFIG"
 UNABLE_TO_GET_SBD_CONFIG = "UNABLE_TO_GET_SBD_CONFIG"
 UNABLE_TO_GET_SBD_STATUS = "UNABLE_TO_GET_SBD_STATUS"
 UNKNOWN_COMMAND = 'UNKNOWN_COMMAND'
 UNSUPPORTED_AGENT = 'UNSUPPORTED_AGENT'
+WATCHDOG_INVALID = "WATCHDOG_INVALID"
+UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS = "UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS"
 WATCHDOG_NOT_FOUND = "WATCHDOG_NOT_FOUND"
diff --git a/pcs/common/test/__init__.py b/pcs/common/test/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/common/tools.py b/pcs/common/tools.py
index f4f6c4b..01194a5 100644
--- a/pcs/common/tools.py
+++ b/pcs/common/tools.py
@@ -33,3 +33,11 @@ def run_parallel(worker, data_list):
 
     for thread in thread_list:
         thread.join()
+
+def format_environment_error(e):
+    if e.filename:
+        return "{0}: '{1}'".format(e.strerror, e.filename)
+    return e.strerror
+
+def join_multilines(strings):
+    return "\n".join([a.strip() for a in strings if a.strip()])
diff --git a/pcs/config.py b/pcs/config.py
index 4659c5b..e410a5a 100644
--- a/pcs/config.py
+++ b/pcs/config.py
@@ -18,8 +18,10 @@ import logging
 import pwd
 import grp
 import time
+import platform
 
 try:
+    import clufter.facts
     import clufter.format_manager
     import clufter.filter_manager
     import clufter.command_manager
@@ -94,8 +96,23 @@ def config_show(argv):
     status.nodes_status(["config"])
     print()
     config_show_cib()
-    cluster.cluster_uidgid([], True)
-    if "--corosync_conf" in utils.pcs_options or not utils.is_rhel6():
+    if (
+        utils.hasCorosyncConf()
+        and
+        (
+            utils.is_rhel6()
+            or
+            (not utils.usefile and "--corosync_conf" not in utils.pcs_options)
+        )
+    ):
+        # with corosync 1 and cman, uid gid is part of cluster.conf file
+        # with corosync 2, uid gid is in a separate directory
+        cluster.cluster_uidgid([], True)
+    if (
+        "--corosync_conf" in utils.pcs_options
+        or
+        (not utils.is_rhel6() and utils.hasCorosyncConf())
+    ):
         print()
         print("Quorum:")
         try:
@@ -113,8 +130,8 @@ def config_show_cib():
     print("Stonith Devices:")
     resource.resource_show([], True)
     print("Fencing Levels:")
-    print()
     stonith.stonith_level_show()
+    print()
 
     lib = utils.get_library_wrapper()
     constraint.location_show([])
@@ -260,7 +277,16 @@ def config_restore_remote(infile_name, infile_obj):
                 err_msgs.append(output)
                 continue
             status = json.loads(output)
-            if status["corosync"] or status["pacemaker"] or status["cman"]:
+            if (
+                status["corosync"]
+                or
+                status["pacemaker"]
+                or
+                status["cman"]
+                or
+                # not supported by older pcsd, do not fail if not present
+                status.get("pacemaker_remote", False)
+            ):
                 err_msgs.append(
                     "Cluster is currently running on node %s. You need to stop "
                         "the cluster in order to restore the configuration."
@@ -279,7 +305,7 @@ def config_restore_remote(infile_name, infile_obj):
     # If node returns HTTP 404 it does not support config syncing at all.
     for node in node_list:
         retval, output = utils.pauseConfigSyncing(node, 10 * 60)
-        if not (retval == 0 or output.endswith("(HTTP error: 404)")):
+        if not (retval == 0 or "(HTTP error: 404)" in output):
             utils.err(output)
 
     if infile_obj:
@@ -299,11 +325,13 @@ def config_restore_remote(infile_name, infile_obj):
 
 def config_restore_local(infile_name, infile_obj):
     if (
-        status.is_cman_running()
+        status.is_service_running("cman")
+        or
+        status.is_service_running("corosync")
         or
-        status.is_corosyc_running()
+        status.is_service_running("pacemaker")
         or
-        status.is_pacemaker_running()
+        status.is_service_running("pacemaker_remote")
     ):
         utils.err(
             "Cluster is currently running on this node. You need to stop "
@@ -529,6 +557,7 @@ def config_import_cman(argv):
     cluster_conf = settings.cluster_conf_file
     dry_run_output = None
     output_format = "cluster.conf" if utils.is_rhel6() else "corosync.conf"
+    dist = None
     invalid_args = False
     for arg in argv:
         if "=" in arg:
@@ -545,6 +574,8 @@ def config_import_cman(argv):
                     output_format = value
                 else:
                     invalid_args = True
+            elif name == "dist":
+                dist = value
             else:
                 invalid_args = True
         else:
@@ -562,12 +593,34 @@ def config_import_cman(argv):
     force = "--force" in utils.pcs_options
     interactive = "--interactive" in utils.pcs_options
 
+    if dist is not None:
+        if output_format == "cluster.conf":
+            if not clufter.facts.cluster_pcs_flatiron("linux", dist.split(",")):
+                utils.err("dist does not match output-format")
+        elif output_format == "corosync.conf":
+            if not clufter.facts.cluster_pcs_needle("linux", dist.split(",")):
+                utils.err("dist does not match output-format")
+    elif (
+        (output_format == "cluster.conf" and utils.is_rhel6())
+        or
+        (output_format == "corosync.conf" and not utils.is_rhel6())
+    ):
+        dist = ",".join(platform.linux_distribution(full_distribution_name=0))
+    elif output_format == "cluster.conf":
+        dist = "redhat,6.7,Santiago"
+    elif output_format == "corosync.conf":
+        dist = "redhat,7.1,Maipo"
+    else:
+        # for output-format=pcs-command[-verbose]
+        dist = ",".join(platform.linux_distribution(full_distribution_name=0))
+
     clufter_args = {
         "input": str(cluster_conf),
         "cib": {"passin": "bytestring"},
         "nocheck": force,
         "batch": True,
         "sys": "linux",
+        "dist": dist,
         # Make it work on RHEL6 as well for sure
         "color": "always" if sys.stdout.isatty() else "never"
     }
@@ -580,11 +633,9 @@ def config_import_cman(argv):
         logging.getLogger("clufter").setLevel(logging.DEBUG)
     if output_format == "cluster.conf":
         clufter_args["ccs_pcmk"] = {"passin": "bytestring"}
-        clufter_args["dist"] = "redhat,6.7,Santiago"
         cmd_name = "ccs2pcs-flatiron"
     elif output_format == "corosync.conf":
         clufter_args["coro"] = {"passin": "struct"}
-        clufter_args["dist"] = "redhat,7.1,Maipo"
         cmd_name = "ccs2pcs-needle"
     elif output_format in ("pcs-commands", "pcs-commands-verbose"):
         clufter_args["output"] = {"passin": "bytestring"}
@@ -598,7 +649,15 @@ def config_import_cman(argv):
             clufter_args["text_width"] = "-1"
             clufter_args["silent"] = False
             clufter_args["noguidance"] = False
-        cmd_name = "ccs2pcscmd-flatiron"
+        if clufter.facts.cluster_pcs_flatiron("linux", dist.split(",")):
+            cmd_name = "ccs2pcscmd-flatiron"
+        elif clufter.facts.cluster_pcs_needle("linux", dist.split(",")):
+            cmd_name = "ccs2pcscmd-needle"
+        else:
+            utils.err(
+                "unrecognized dist, try something recognized"
+                + " (e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty)"
+            )
     clufter_args_obj = type(str("ClufterOptions"), (object, ), clufter_args)
 
     # run convertor
@@ -711,29 +770,36 @@ def config_export_pcs_commands(argv, verbose=False):
     interactive = "--interactive" in utils.pcs_options
     invalid_args = False
     output_file = None
+    dist = None
     for arg in argv:
         if "=" in arg:
             name, value = arg.split("=", 1)
             if name == "output":
                 output_file = value
+            elif name == "dist":
+                dist = value
             else:
                 invalid_args = True
         else:
             invalid_args = True
-    if invalid_args or not output_file:
+    # check options
+    if invalid_args:
         usage.config(["export", "pcs-commands"])
         sys.exit(1)
+    # complete optional options
+    if dist is None:
+        dist = ",".join(platform.linux_distribution(full_distribution_name=0))
 
     # prepare convertor options
     clufter_args = {
         "nocheck": force,
         "batch": True,
         "sys": "linux",
+        "dist": dist,
         # Make it work on RHEL6 as well for sure
         "color": "always" if sys.stdout.isatty() else "never",
         "coro": settings.corosync_conf_file,
         "ccs": settings.cluster_conf_file,
-        "output": {"passin": "bytestring"},
         "start_wait": "60",
         "tmp_cib": "tmp-cib.xml",
         "force": force,
@@ -741,6 +807,10 @@ def config_export_pcs_commands(argv, verbose=False):
         "silent": True,
         "noguidance": True,
     }
+    if output_file:
+        clufter_args["output"] = {"passin": "bytestring"}
+    else:
+        clufter_args["output"] = "-"
     if interactive:
         if "EDITOR" not in os.environ:
             utils.err("$EDITOR environment variable is not set")
@@ -765,13 +835,14 @@ def config_export_pcs_commands(argv, verbose=False):
         "Error: unable to export cluster configuration"
     )
 
-    # save commands
-    ok, message = utils.write_file(
-        output_file,
-        clufter_args_obj.output["passout"]
-    )
-    if not ok:
-        utils.err(message)
+    # save commands if not printed to stdout by clufter
+    if output_file:
+        ok, message = utils.write_file(
+            output_file,
+            clufter_args_obj.output["passout"]
+        )
+        if not ok:
+            utils.err(message)
 
 def run_clufter(cmd_name, cmd_args, debug, force, err_prefix):
     try:
diff --git a/pcs/constraint.py b/pcs/constraint.py
index 5d9b0df..d8415b6 100644
--- a/pcs/constraint.py
+++ b/pcs/constraint.py
@@ -90,6 +90,7 @@ def constraint_cmd(argv):
             command_map = {
                 "set": ticket_command.create_with_set,
                 "add": ticket_command.add,
+                "remove": ticket_command.remove,
                 "show": ticket_command.show,
             }
             sub_command = argv[0] if argv else "show"
@@ -593,7 +594,7 @@ def location_show(argv):
             print("  Node: " + node)
 
             nodehash_label = (
-                (nodehashon, "    Allowed to run:")
+                (nodehashon, "    Allowed to run:"),
                 (nodehashoff, "    Not allowed to run:")
             )
             for nodehash, label in nodehash_label:
diff --git a/pcs/lib/booth/__init__.py b/pcs/lib/booth/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/lib/booth/config_exchange.py b/pcs/lib/booth/config_exchange.py
new file mode 100644
index 0000000..377af1d
--- /dev/null
+++ b/pcs/lib/booth/config_exchange.py
@@ -0,0 +1,28 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+from pcs.lib.booth.config_structure import ConfigItem
+
+def to_exchange_format(booth_configuration):
+    return [
+        {
+            "key": item.key,
+            "value": item.value,
+            "details": to_exchange_format(item.details),
+        }
+        for item in booth_configuration
+    ]
+
+
+def from_exchange_format(exchange_format):
+    return [
+        ConfigItem(
+            item["key"],
+            item["value"],
+            from_exchange_format(item["details"]),
+        )
+        for item in exchange_format
+    ]
diff --git a/pcs/lib/booth/config_files.py b/pcs/lib/booth/config_files.py
new file mode 100644
index 0000000..7b91379
--- /dev/null
+++ b/pcs/lib/booth/config_files.py
@@ -0,0 +1,104 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import os
+import binascii
+
+from pcs.common import report_codes, env_file_role_codes as file_roles
+from pcs.common.tools import format_environment_error
+from pcs.lib import reports as lib_reports
+from pcs.lib.booth import reports
+from pcs.lib.errors import ReportItemSeverity
+from pcs.settings import booth_config_dir as BOOTH_CONFIG_DIR
+
+
+def generate_key():
+    return binascii.hexlify(os.urandom(32))
+
+def get_all_configs_file_names():
+    """
+    Returns list of all file names ending with '.conf' in booth configuration
+    directory.
+    """
+    if not os.path.isdir(BOOTH_CONFIG_DIR):
+        return []
+    return [
+        file_name
+        for file_name in os.listdir(BOOTH_CONFIG_DIR)
+        if
+            file_name.endswith(".conf")
+            and
+            len(file_name) > len(".conf")
+            and
+            os.path.isfile(os.path.join(BOOTH_CONFIG_DIR, file_name))
+    ]
+
+
+def _read_config(file_name):
+    """
+    Read specified booth config from default booth config directory.
+
+    file_name -- string, name of file
+    """
+    with open(os.path.join(BOOTH_CONFIG_DIR, file_name), "r") as file:
+        return file.read()
+
+
+def read_configs(reporter, skip_wrong_config=False):
+    """
+    Returns content of all configs present on local system in dictionary,
+    where key is name of config and value is its content.
+
+    reporter -- report processor
+    skip_wrong_config -- if True skip local configs that are unreadable
+    """
+    report_list = []
+    output = {}
+    for file_name in get_all_configs_file_names():
+        try:
+            output[file_name] = _read_config(file_name)
+        except EnvironmentError:
+            report_list.append(reports.booth_config_read_error(
+                file_name,
+                (
+                    ReportItemSeverity.WARNING if skip_wrong_config
+                    else ReportItemSeverity.ERROR
+                ),
+                (
+                    None if skip_wrong_config
+                    else report_codes.SKIP_UNREADABLE_CONFIG
+                )
+            ))
+    reporter.process_list(report_list)
+    return output
+
+
+def read_authfile(reporter, path):
+    """
+    Returns content of specified authfile as bytes. None if file is not in
+    default booth directory or there was some IO error.
+
+    reporter -- report processor
+    path -- path to the authfile to be read
+    """
+    if not path:
+        return None
+    if os.path.dirname(os.path.abspath(path)) != BOOTH_CONFIG_DIR:
+        reporter.process(reports.booth_unsupported_file_location(path))
+        return None
+    try:
+        with open(path, "rb") as file:
+            return file.read()
+    except EnvironmentError as e:
+        reporter.process(lib_reports.file_io_error(
+            file_roles.BOOTH_KEY,
+            path,
+            reason=format_environment_error(e),
+            operation="read",
+            severity=ReportItemSeverity.WARNING
+        ))
+        return None
diff --git a/pcs/lib/booth/config_parser.py b/pcs/lib/booth/config_parser.py
new file mode 100644
index 0000000..bdc79fd
--- /dev/null
+++ b/pcs/lib/booth/config_parser.py
@@ -0,0 +1,91 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import re
+
+from pcs.lib.booth import config_structure, reports
+from pcs.lib.errors import LibraryError
+
+
+class InvalidLines(Exception):
+    pass
+
+def parse(content):
+    try:
+        return organize_lines(parse_to_raw_lines(content))
+    except InvalidLines as e:
+        raise LibraryError(
+            reports.booth_config_unexpected_lines(e.args[0])
+        )
+
+def build(config_line_list):
+    newline = [""]
+    return "\n".join(build_to_lines(config_line_list) + newline)
+
+def build_to_lines(config_line_list, deep=0):
+    line_list = []
+    for key, value, details in config_line_list:
+        line_value = value if key != "ticket" else '"{0}"'.format(value)
+        line_list.append("{0}{1} = {2}".format("  "*deep, key, line_value))
+        if details:
+            line_list.extend(build_to_lines(details, deep+1))
+    return line_list
+
+
+def organize_lines(raw_line_list):
+    #Decision: Global key is moved up when is below ticket. Alternative is move
+    #it below all ticket details. But it is confusing.
+    global_section = []
+    ticket_section = []
+    current_ticket = None
+    for key, value in raw_line_list:
+        if key == "ticket":
+            current_ticket = config_structure.ConfigItem(key, value)
+            ticket_section.append(current_ticket)
+        elif key in config_structure.GLOBAL_KEYS or not current_ticket:
+            global_section.append(config_structure.ConfigItem(key, value))
+        else:
+            current_ticket.details.append(
+                config_structure.ConfigItem(key, value)
+            )
+
+    return global_section + ticket_section
+
+def search_with_multiple_re(re_object_list, string):
+    """
+    return MatchObject of first matching regular expression object or None
+    list re_object_list contains regular expresssion objects (products of
+        re.compile)
+    """
+    for expression in re_object_list:
+        match = expression.search(string)
+        if match:
+            return match
+    return None
+
+def parse_to_raw_lines(config_content):
+    keyword_part = r"^(?P<key>[a-zA-Z0-9_-]+)\s*=\s*"
+    expression_list = [re.compile(pattern.format(keyword_part)) for pattern in [
+        r"""{0}(?P<value>[^'"]+)$""",
+        r"""{0}'(?P<value>[^']*)'\s*(#.*)?$""",
+        r"""{0}"(?P<value>[^"]*)"\s*(#.*)?$""",
+    ]]
+
+    line_list = []
+    invalid_line_list = []
+    for line in config_content.splitlines():
+        line = line.strip()
+        match = search_with_multiple_re(expression_list, line)
+        if match:
+            line_list.append((match.group("key"), match.group("value")))
+        elif line and not line.startswith("#"):
+            invalid_line_list.append(line)
+
+    if invalid_line_list:
+        raise InvalidLines(invalid_line_list)
+
+    return line_list
diff --git a/pcs/lib/booth/config_structure.py b/pcs/lib/booth/config_structure.py
new file mode 100644
index 0000000..09ff1a7
--- /dev/null
+++ b/pcs/lib/booth/config_structure.py
@@ -0,0 +1,161 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import re
+
+import pcs.lib.reports as common_reports
+from pcs.lib.booth import reports
+from pcs.lib.errors import LibraryError, ReportItemSeverity as severities
+from pcs.common import report_codes
+from collections import namedtuple
+
+GLOBAL_KEYS = (
+    "transport",
+    "port",
+    "name",
+    "authfile",
+    "maxtimeskew",
+    "site",
+    "arbitrator",
+    "site-user",
+    "site-group",
+    "arbitrator-user",
+    "arbitrator-group",
+    "debug",
+    "ticket",
+)
+TICKET_KEYS = (
+    "acquire-after",
+    "attr-prereq",
+    "before-acquire-handler",
+    "expire",
+    "renewal-freq",
+    "retries",
+    "timeout",
+    "weights",
+)
+
+class ConfigItem(namedtuple("ConfigItem", "key value details")):
+    def __new__(cls, key, value, details=None):
+        details = details if details else []
+        return super(ConfigItem, cls).__new__(cls, key, value, details)
+
+def validate_peers(site_list, arbitrator_list):
+    report = []
+
+    if len(site_list) < 2:
+        report.append(reports.booth_lack_of_sites(site_list))
+
+    peer_list = site_list + arbitrator_list
+
+    if len(peer_list) % 2 == 0:
+        report.append(reports.booth_even_peers_num(len(peer_list)))
+
+    address_set = set()
+    duplicate_addresses = set()
+    for address in peer_list:
+        if address in address_set:
+            duplicate_addresses.add(address)
+        else:
+            address_set.add(address)
+    if duplicate_addresses:
+        report.append(reports.booth_address_duplication(duplicate_addresses))
+
+    if report:
+        raise LibraryError(*report)
+
+def take_peers(booth_configuration):
+    return (
+        pick_list_by_key(booth_configuration, "site"),
+        pick_list_by_key(booth_configuration, "arbitrator"),
+    )
+
+def pick_list_by_key(booth_configuration, key):
+    return [item.value for item in booth_configuration if item.key == key]
+
+def remove_ticket(booth_configuration, ticket_name):
+    validate_ticket_exists(booth_configuration, ticket_name)
+    return [
+        config_item for config_item in booth_configuration
+        if config_item.key != "ticket" or config_item.value != ticket_name
+    ]
+
+def add_ticket(
+    report_processor, booth_configuration, ticket_name, options,
+    allow_unknown_options
+):
+    validate_ticket_name(ticket_name)
+    validate_ticket_unique(booth_configuration, ticket_name)
+    validate_ticket_options(report_processor, options, allow_unknown_options)
+    return booth_configuration + [
+        ConfigItem("ticket", ticket_name, [
+            ConfigItem(key, value) for key, value in options.items()
+        ])
+    ]
+
+def validate_ticket_exists(booth_configuration, ticket_name):
+    if not ticket_exists(booth_configuration, ticket_name):
+        raise LibraryError(reports.booth_ticket_does_not_exist(ticket_name))
+
+def validate_ticket_unique(booth_configuration, ticket_name):
+    if ticket_exists(booth_configuration, ticket_name):
+        raise LibraryError(reports.booth_ticket_duplicate(ticket_name))
+
+def validate_ticket_options(report_processor, options, allow_unknown_options):
+    reports = []
+    for key in sorted(options):
+        if key in GLOBAL_KEYS:
+            reports.append(
+                common_reports.invalid_option(key, TICKET_KEYS, "booth ticket")
+            )
+
+        elif key not in TICKET_KEYS:
+            reports.append(
+                common_reports.invalid_option(
+                    key, TICKET_KEYS,
+                    "booth ticket",
+                    severity=(
+                        severities.WARNING if allow_unknown_options
+                        else severities.ERROR
+                    ),
+                    forceable=(
+                        None if allow_unknown_options
+                        else report_codes.FORCE_OPTIONS
+                    ),
+                )
+            )
+
+        if not options[key].strip():
+            reports.append(common_reports.invalid_option_value(
+                key,
+                options[key],
+                "no-empty",
+            ))
+
+    report_processor.process_list(reports)
+
+def ticket_exists(booth_configuration, ticket_name):
+    return any(
+        value for key, value, _ in booth_configuration
+        if key == "ticket" and value == ticket_name
+    )
+
+def validate_ticket_name(ticket_name):
+    if not re.compile(r"^[\w-]+$").search(ticket_name):
+        raise LibraryError(reports.booth_ticket_name_invalid(ticket_name))
+
+def set_authfile(booth_configuration, auth_file):
+    return [ConfigItem("authfile", auth_file)] + [
+        config_item for config_item in booth_configuration
+        if config_item.key != "authfile"
+    ]
+
+def get_authfile(booth_configuration):
+    for key, value, _ in reversed(booth_configuration):
+        if key == "authfile":
+            return value
+    return None
diff --git a/pcs/lib/booth/env.py b/pcs/lib/booth/env.py
new file mode 100644
index 0000000..57d47aa
--- /dev/null
+++ b/pcs/lib/booth/env.py
@@ -0,0 +1,149 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import os
+import pwd
+import grp
+
+from pcs import settings
+from pcs.common import env_file_role_codes
+from pcs.common.tools import format_environment_error
+from pcs.lib import reports as common_reports
+from pcs.lib.booth import reports
+from pcs.lib.env_file import GhostFile, RealFile
+from pcs.lib.errors import LibraryError
+from pcs.settings import booth_config_dir as BOOTH_CONFIG_DIR
+
+
+def get_booth_env_file_name(name, extension):
+    report_list = []
+    if "/" in name:
+        report_list.append(
+            reports.booth_invalid_name(name, "contains illegal character '/'")
+        )
+    if report_list:
+        raise LibraryError(*report_list)
+    return "{0}.{1}".format(os.path.join(BOOTH_CONFIG_DIR, name), extension)
+
+def get_config_file_name(name):
+    return get_booth_env_file_name(name, "conf")
+
+def get_key_path(name):
+    return get_booth_env_file_name(name, "key")
+
+def report_keyfile_io_error(file_path, operation, e):
+    return LibraryError(common_reports.file_io_error(
+        file_role=env_file_role_codes.BOOTH_KEY,
+        file_path=file_path,
+        operation=operation,
+        reason=format_environment_error(e)
+    ))
+
+def set_keyfile_access(file_path):
+    #shutil.chown is not in python2
+    try:
+        uid = pwd.getpwnam(settings.pacemaker_uname).pw_uid
+    except KeyError:
+        raise LibraryError(common_reports.unable_to_determine_user_uid(
+            settings.pacemaker_uname
+        ))
+    try:
+        gid = grp.getgrnam(settings.pacemaker_gname).gr_gid
+    except KeyError:
+        raise LibraryError(common_reports.unable_to_determine_group_gid(
+            settings.pacemaker_gname
+        ))
+    try:
+        os.chown(file_path, uid, gid)
+    except EnvironmentError as e:
+        raise report_keyfile_io_error(file_path, "chown", e)
+    try:
+        os.chmod(file_path, 0o600)
+    except EnvironmentError as e:
+        raise report_keyfile_io_error(file_path, "chmod", e)
+
+class BoothEnv(object):
+    def __init__(self, report_processor, env_data):
+        self.__report_processor = report_processor
+        self.__name = env_data["name"]
+        if "config_file" in env_data:
+            self.__config = GhostFile(
+                file_role=env_file_role_codes.BOOTH_CONFIG,
+                content=env_data["config_file"]["content"]
+            )
+            self.__key_path = env_data["key_path"]
+            self.__key = GhostFile(
+                file_role=env_file_role_codes.BOOTH_KEY,
+                content=env_data["key_file"]["content"]
+            )
+        else:
+            self.__config = RealFile(
+                file_role=env_file_role_codes.BOOTH_CONFIG,
+                file_path=get_config_file_name(env_data["name"]),
+            )
+            self.__set_key_path(get_key_path(env_data["name"]))
+
+    def __set_key_path(self, path):
+        self.__key_path = path
+        self.__key = RealFile(
+            file_role=env_file_role_codes.BOOTH_KEY,
+            file_path=path,
+        )
+
+    def command_expect_live_env(self):
+        if not self.__config.is_live:
+            raise LibraryError(common_reports.live_environment_required([
+                "--booth-conf",
+                "--booth-key",
+            ]))
+
+    def set_key_path(self, path):
+        if not self.__config.is_live:
+            raise AssertionError(
+                "Set path of keyfile is supported only in live environment"
+            )
+        self.__set_key_path(path)
+
+    @property
+    def name(self):
+        return self.__name
+
+    @property
+    def key_path(self):
+        return self.__key_path
+
+    def get_config_content(self):
+        return self.__config.read()
+
+    def create_config(self, content, can_overwrite_existing=False):
+        self.__config.assert_no_conflict_with_existing(
+            self.__report_processor,
+            can_overwrite_existing
+        )
+        self.__config.write(content)
+
+    def create_key(self, key_content, can_overwrite_existing=False):
+        self.__key.assert_no_conflict_with_existing(
+            self.__report_processor,
+            can_overwrite_existing
+        )
+        self.__key.write(key_content, set_keyfile_access, is_binary=True)
+
+    def push_config(self, content):
+        self.__config.write(content)
+
+    def remove_key(self):
+        self.__key.remove(silence_no_existence=True)
+
+    def remove_config(self):
+        self.__config.remove()
+
+    def export(self):
+        return {} if self.__config.is_live else {
+            "config_file": self.__config.export(),
+            "key_file": self.__key.export(),
+        }
diff --git a/pcs/lib/booth/reports.py b/pcs/lib/booth/reports.py
new file mode 100644
index 0000000..6aa9d3d
--- /dev/null
+++ b/pcs/lib/booth/reports.py
@@ -0,0 +1,418 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.common import report_codes
+from pcs.lib.errors import ReportItem, ReportItemSeverity
+
+
+def booth_lack_of_sites(site_list):
+    """
+    Less than 2 booth sites entered. But it does not make sense.
+    list site_list contains currently entered sites
+    """
+    return ReportItem.error(
+        report_codes.BOOTH_LACK_OF_SITES,
+        "lack of sites for booth configuration (need 2 at least):"
+            " sites {sites_string}"
+        ,
+        info={
+            "sites": site_list,
+            "sites_string": ", ".join(site_list) if site_list else "missing",
+        }
+    )
+
+def booth_even_peers_num(number):
+    """
+    Booth requires odd number of peers. But even number of peers was entered.
+    integer number determines how many peers was entered
+    """
+    return ReportItem.error(
+        report_codes.BOOTH_EVEN_PEERS_NUM,
+        "odd number of peers is required (entered {number} peers)",
+        info={
+            "number": number,
+        }
+    )
+
+def booth_address_duplication(duplicate_addresses):
+    """
+    Address of each peer must unique. But address duplication appeared.
+    set duplicate_addresses contains addreses entered multiple times
+    """
+    return ReportItem.error(
+        report_codes.BOOTH_ADDRESS_DUPLICATION,
+        "duplicate address for booth configuration: {addresses_string}"
+        ,
+        info={
+            "addresses": duplicate_addresses,
+            "addresses_string": ", ".join(duplicate_addresses),
+        }
+    )
+
+def booth_config_unexpected_lines(line_list):
+    """
+    Booth config have defined structure. But line out of structure definition
+        appeared.
+    list line_list contains lines out of defined structure
+    """
+    return ReportItem.error(
+        report_codes.BOOTH_CONFIG_UNEXPECTED_LINES,
+        "unexpected line appeard in config: \n{lines_string}",
+        info={
+            "line_list": line_list,
+            "lines_string": "\n".join(line_list)
+        }
+    )
+
+def booth_invalid_name(name, reason):
+    """
+    Booth instance name have rules. For example it cannot contain illegal
+        characters like '/'. But some of rules was violated.
+    string name is entered booth instance name
+    """
+    return ReportItem.error(
+        report_codes.BOOTH_INVALID_NAME,
+            "booth name '{name}' is not valid ({reason})"
+        ,
+        info={
+            "name": name,
+            "reason": reason,
+        }
+    )
+
+def booth_ticket_name_invalid(ticket_name):
+    """
+    Name of booth ticket may consists of alphanumeric characters or dash.
+        Entered ticket name violating this rule.
+    string ticket_name is entered booth ticket name
+    """
+    return ReportItem.error(
+        report_codes.BOOTH_TICKET_NAME_INVALID,
+        "booth ticket name '{ticket_name}' is not valid,"
+            " use alphanumeric chars or dash"
+        ,
+        info={
+            "ticket_name": ticket_name,
+        }
+    )
+
+def booth_ticket_duplicate(ticket_name):
+    """
+    Each booth ticket name must be uniqe. But duplicate booth ticket name
+        was entered.
+    string ticket_name is entered booth ticket name
+    """
+    return ReportItem.error(
+        report_codes.BOOTH_TICKET_DUPLICATE,
+        "booth ticket name '{ticket_name}' already exists in configuration",
+        info={
+            "ticket_name": ticket_name,
+        }
+    )
+
+def booth_ticket_does_not_exist(ticket_name):
+    """
+    Some operations (like ticket remove) expect the ticket name in booth
+        configuration. But the ticket name not found in booth configuration.
+    string ticket_name is entered booth ticket name
+    """
+    return ReportItem.error(
+        report_codes.BOOTH_TICKET_DOES_NOT_EXIST,
+        "booth ticket name '{ticket_name}' does not exist",
+        info={
+            "ticket_name": ticket_name,
+        }
+    )
+
+def booth_already_in_cib(name):
+    """
+    Each booth instance should be in a cib once maximally. Existence of booth
+        instance in cib detected during creating new one.
+    string name is booth instance name
+    """
+    return ReportItem.error(
+        report_codes.BOOTH_ALREADY_IN_CIB,
+        "booth instance '{name}' is already created as cluster resource",
+        info={
+            "name": name,
+        }
+    )
+
+def booth_not_exists_in_cib(name):
+    """
+    Remove booth instance from cib required. But no such instance found in cib.
+    string name is booth instance name
+    """
+    return ReportItem.error(
+        report_codes.BOOTH_NOT_EXISTS_IN_CIB,
+        "booth instance '{name}' not found in cib",
+        info={
+            "name": name,
+        }
+    )
+
+def booth_config_is_used(name, detail=""):
+    """
+    Booth config use detected during destroy request.
+    string name is booth instance name
+    string detail provide more details (for example booth instance is used as
+        cluster resource or is started/enabled under systemd)
+    """
+    return ReportItem.error(
+        report_codes.BOOTH_CONFIG_IS_USED,
+        "booth instance '{name}' is used{detail_string}",
+        info={
+            "name": name,
+            "detail": detail,
+            "detail_string": " {0}".format(detail) if detail else "",
+        }
+    )
+
+def booth_multiple_times_in_cib(
+    name, severity=ReportItemSeverity.ERROR
+):
+    """
+    Each booth instance should be in a cib once maximally. But multiple
+        occurences detected. For example during remove booth instance from cib.
+        Notify user about this fact is required. When operation is forced
+        user should be notified about multiple occurences.
+    string name is booth instance name
+    ReportItemSeverity severit should be ERROR or WARNING (depends on context)
+        is flag for next report processing
+        Because of severity coupling with ReportItem is it specified here.
+    """
+    return ReportItem(
+        report_codes.BOOTH_MULTIPLE_TIMES_IN_CIB,
+        severity,
+        "found more than one booth instance '{name}' in cib",
+        info={
+            "name": name,
+        },
+        forceable=report_codes.FORCE_BOOTH_REMOVE_FROM_CIB
+            if severity == ReportItemSeverity.ERROR else None
+    )
+
+
+def booth_config_distribution_started():
+    """
+    booth configuration is about to be sent to nodes
+    """
+    return ReportItem.info(
+        report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
+        "Sending booth configuration to cluster nodes..."
+    )
+
+
+def booth_config_accepted_by_node(node=None, name_list=None):
+    """
+    Booth config has been saved on specified node.
+
+    node -- name of node
+    name_list -- list of names of booth instance
+    """
+    if name_list:
+        name = ", ".join(name_list)
+        if name == "booth":
+            msg = "Booth config saved."
+        else:
+            msg = "Booth config(s) ({name}) saved."
+    else:
+        msg = "Booth config saved."
+        name = None
+    return ReportItem.info(
+        report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
+        msg if node is None else "{node}: " + msg,
+        info={
+            "node": node,
+            "name": name,
+            "name_list": name_list
+        }
+    )
+
+
+def booth_config_distribution_node_error(node, reason, name=None):
+    """
+    Saving booth config failed on specified node.
+
+    node -- node name
+    reason -- reason of failure
+    name -- name of booth instance
+    """
+    if name and name != "booth":
+        msg = "Unable to save booth config ({name}) on node '{node}': {reason}"
+    else:
+        msg = "Unable to save booth config on node '{node}': {reason}"
+    return ReportItem.error(
+        report_codes.BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR,
+        msg,
+        info={
+            "node": node,
+            "name": name,
+            "reason": reason
+        }
+    )
+
+
+def booth_config_read_error(
+    name, severity=ReportItemSeverity.ERROR, forceable=None
+):
+    """
+    Unable to read from specified booth instance config.
+
+    name -- name of booth instance
+    severity -- severity of report item
+    forceable -- is this report item forceable? by what category?
+    """
+    if name and name != "booth":
+        msg = "Unable to read booth config ({name})."
+    else:
+        msg = "Unable to read booth config."
+    return ReportItem(
+        report_codes.BOOTH_CONFIG_READ_ERROR,
+        severity,
+        msg,
+        info={"name": name},
+        forceable=forceable
+    )
+
+
+def booth_fetching_config_from_node_started(node, config=None):
+    """
+    fetching of booth config from specified node started
+
+    node -- node from which config is fetching
+    config -- config name
+    """
+    if config or config == 'booth':
+        msg = "Fetching booth config from node '{node}'..."
+    else:
+        msg = "Fetching booth config '{config}' from node '{node}'..."
+    return ReportItem.info(
+        report_codes.BOOTH_FETCHING_CONFIG_FROM_NODE,
+        msg,
+        info={
+            "node": node,
+            "config": config,
+        }
+    )
+
+
+def booth_unsupported_file_location(file):
+    """
+    location of booth configuration file (config, authfile) file is not
+    supported (not in /etc/booth/)
+
+    file -- file path
+    """
+    return ReportItem.warning(
+        report_codes.BOOTH_UNSUPORTED_FILE_LOCATION,
+        "skipping file {file}: unsupported file location",
+        info={"file": file}
+    )
+
+
+def booth_daemon_status_error(reason):
+    """
+    Unable to get status of booth daemon because of error.
+
+    reason -- reason
+    """
+    return ReportItem.error(
+        report_codes.BOOTH_DAEMON_STATUS_ERROR,
+        "unable to get status of booth daemon: {reason}",
+        info={"reason": reason}
+    )
+
+
+def booth_tickets_status_error(reason=None):
+    """
+    Unable to get status of booth tickets because of error.
+
+    reason -- reason
+    """
+    return ReportItem.error(
+        report_codes.BOOTH_TICKET_STATUS_ERROR,
+        "unable to get status of booth tickets",
+        info={
+            "reason": reason,
+        }
+    )
+
+
+def booth_peers_status_error(reason=None):
+    """
+    Unable to get status of booth peers because of error.
+
+    reason -- reason
+    """
+    return ReportItem.error(
+        report_codes.BOOTH_PEERS_STATUS_ERROR,
+        "unable to get status of booth peers",
+        info={
+            "reason": reason,
+        }
+    )
+
+def booth_cannot_determine_local_site_ip():
+    """
+    Some booth operations are performed on specific site and requires to specify
+        site ip. When site specification omitted pcs can try determine local ip.
+        But determine local site ip failed.
+    """
+    return ReportItem.error(
+        report_codes.BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP,
+        "cannot determine local site ip, please specify site parameter",
+        info={}
+    )
+
+def booth_ticket_operation_failed(operation, reason, site_ip, ticket_name):
+    """
+    Pcs uses external booth tools for some ticket_name operations. For example
+        grand and revoke. But the external command failed.
+    string operatin determine what was intended perform with ticket_name
+    string reason is taken from external booth command
+    string site_ip specifiy what site had to run the command
+    string ticket_name specify with which ticket had to run the command
+    """
+    return ReportItem.error(
+        report_codes.BOOTH_TICKET_OPERATION_FAILED,
+        "unable to {operation} booth ticket '{ticket_name}' for site '{site_ip}', "
+            "reason: {reason}"
+        ,
+        info={
+            "operation": operation,
+            "reason": reason,
+            "site_ip": site_ip,
+            "ticket_name": ticket_name,
+        }
+    )
+
+def booth_skipping_config(config_file, reason):
+    """
+    Warning about skipping booth config file.
+
+    config_file -- file name of config which is skipped
+    reason -- reason
+    """
+    return ReportItem.warning(
+        report_codes.BOOTH_SKIPPING_CONFIG,
+        "Skipping config file '{config_file}': {reason}",
+        info={
+            "config_file": config_file,
+            "reason": reason,
+        }
+    )
+
+def booth_cannot_identify_keyfile(severity=ReportItemSeverity.ERROR):
+    return ReportItem(
+        report_codes.BOOTH_CANNOT_IDENTIFY_KEYFILE,
+        severity,
+        "cannot identify authfile in booth configuration",
+        info={},
+        forceable=report_codes.FORCE_BOOTH_DESTROY
+            if severity == ReportItemSeverity.ERROR else None
+    )
diff --git a/pcs/lib/booth/resource.py b/pcs/lib/booth/resource.py
new file mode 100644
index 0000000..a4b7b1e
--- /dev/null
+++ b/pcs/lib/booth/resource.py
@@ -0,0 +1,97 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.lib.cib.tools import find_unique_id
+
+
+def create_resource_id(resources_section, name, suffix):
+    return find_unique_id(
+        resources_section.getroottree(), "booth-{0}-{1}".format(name, suffix)
+    )
+
+def get_creator(resource_create, resource_remove=None):
+    #TODO resource_create  is provisional hack until resources are not moved to
+    #lib
+    def create_booth_in_cluster(ip, booth_config_file_path, create_id):
+        ip_id = create_id("ip")
+        booth_id = create_id("service")
+        group_id = create_id("group")
+
+        resource_create(
+            ra_id=ip_id,
+            ra_type="ocf:heartbeat:IPaddr2",
+            ra_values=["ip={0}".format(ip)],
+            op_values=[],
+            meta_values=[],
+            clone_opts=[],
+            group=group_id,
+        )
+        try:
+            resource_create(
+                ra_id=booth_id,
+                ra_type="ocf:pacemaker:booth-site",
+                ra_values=["config={0}".format(booth_config_file_path)],
+                op_values=[],
+                meta_values=[],
+                clone_opts=[],
+                group=group_id,
+            )
+        except SystemExit:
+            resource_remove(ip_id)
+    return create_booth_in_cluster
+
+def is_ip_resource(resource_element):
+    return resource_element.attrib["type"] == "IPaddr2"
+
+def find_grouped_ip_element_to_remove(booth_element):
+    if booth_element.getparent().tag != "group":
+        return None
+
+    group = booth_element.getparent()
+    if len(group) != 2:
+        #when something else in group, ip is not for remove
+        return None
+    for element in group:
+        if is_ip_resource(element):
+            return element
+    return None
+
+def get_remover(resource_remove):
+    def remove_from_cluster(booth_element_list):
+        for element in booth_element_list:
+            ip_resource_to_remove = find_grouped_ip_element_to_remove(element)
+            if ip_resource_to_remove is not None:
+                resource_remove(ip_resource_to_remove.attrib["id"])
+            resource_remove(element.attrib["id"])
+
+    return remove_from_cluster
+
+def find_for_config(resources_section, booth_config_file_path):
+    return resources_section.xpath(("""
+        .//primitive[
+            @type="booth-site"
+            and
+            instance_attributes[nvpair[@name="config" and @value="{0}"]]
+        ]
+    """).format(booth_config_file_path))
+
+def find_bound_ip(resources_section, booth_config_file_path):
+    return resources_section.xpath(("""
+        .//group[
+            primitive[
+                @type="booth-site"
+                and
+                instance_attributes[
+                    nvpair[@name="config" and @value="{0}"]
+                ]
+            ]
+        ]
+        /primitive[@type="IPaddr2"]
+        /instance_attributes
+        /nvpair[@name="ip"]
+        /@value
+    """).format(booth_config_file_path))
diff --git a/pcs/lib/booth/status.py b/pcs/lib/booth/status.py
new file mode 100644
index 0000000..87cdc05
--- /dev/null
+++ b/pcs/lib/booth/status.py
@@ -0,0 +1,50 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs import settings
+from pcs.common.tools import join_multilines
+from pcs.lib.booth import reports
+from pcs.lib.errors import LibraryError
+
+
+def get_daemon_status(runner, name=None):
+    cmd = [settings.booth_binary, "status"]
+    if name:
+        cmd += ["-c", name]
+    stdout, stderr, return_value = runner.run(cmd)
+    # 7 means that there is no booth instance running
+    if return_value not in [0, 7]:
+        raise LibraryError(
+            reports.booth_daemon_status_error(join_multilines([stderr, stdout]))
+        )
+    return stdout
+
+
+def get_tickets_status(runner, name=None):
+    cmd = [settings.booth_binary, "list"]
+    if name:
+        cmd += ["-c", name]
+    stdout, stderr, return_value = runner.run(cmd)
+    if return_value != 0:
+        raise LibraryError(
+            reports.booth_tickets_status_error(
+                join_multilines([stderr, stdout])
+            )
+        )
+    return stdout
+
+
+def get_peers_status(runner, name=None):
+    cmd = [settings.booth_binary, "peers"]
+    if name:
+        cmd += ["-c", name]
+    stdout, stderr, return_value = runner.run(cmd)
+    if return_value != 0:
+        raise LibraryError(
+            reports.booth_peers_status_error(join_multilines([stderr, stdout]))
+        )
+    return stdout
diff --git a/pcs/lib/booth/sync.py b/pcs/lib/booth/sync.py
new file mode 100644
index 0000000..374b96d
--- /dev/null
+++ b/pcs/lib/booth/sync.py
@@ -0,0 +1,210 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import os
+import json
+import base64
+
+from pcs.common import report_codes
+from pcs.lib import reports as lib_reports
+from pcs.lib.errors import LibraryError, ReportItemSeverity as Severities
+from pcs.lib.external import (
+    NodeCommunicator,
+    NodeCommunicationException,
+    node_communicator_exception_to_report_item,
+    parallel_nodes_communication_helper,
+)
+from pcs.lib.booth import (
+    config_files as booth_conf,
+    config_structure,
+    config_parser,
+    reports,
+)
+
+
+def _set_config_on_node(
+    communicator, reporter, node, name, config_data, authfile=None,
+    authfile_data=None
+):
+    """
+    Set booth config for instance 'name' on specified node.
+
+    communicator -- NodeCommunicator
+    reporter -- report processor
+    node -- NodeAddresses
+    name -- name of booth instance
+    config_data -- booth config as string
+    authfile -- path to authfile
+    authfile_data -- authfile content as bytes
+    """
+    data = {
+        "config": {
+            "name": "{0}.conf".format(name),
+            "data": config_data
+        }
+    }
+    if authfile is not None and authfile_data is not None:
+        data["authfile"] = {
+            "name": os.path.basename(authfile),
+            "data": base64.b64encode(authfile_data).decode("utf-8")
+        }
+    communicator.call_node(
+        node,
+        "remote/booth_set_config",
+        NodeCommunicator.format_data_dict([("data_json", json.dumps(data))])
+    )
+    reporter.process(reports.booth_config_accepted_by_node(node.label, [name]))
+
+
+def send_config_to_all_nodes(
+    communicator, reporter, node_list, name, config_data, authfile=None,
+    authfile_data=None, skip_offline=False
+):
+    """
+    Send config_data of specified booth instance from local node to all nodes in
+    node_list.
+
+    communicator -- NodeCommunicator
+    reporter -- report processor
+    node_list -- NodeAddressesList
+    name -- name of booth instance
+    config_data -- config_data content as string
+    authfile -- path to authfile
+    authfile_data -- content of authfile as bytes
+    skip_offline -- if True offline nodes will be skipped
+    """
+    reporter.process(reports.booth_config_distribution_started())
+    parallel_nodes_communication_helper(
+        _set_config_on_node,
+        [
+            (
+                [
+                    communicator, reporter, node, name, config_data,
+                    authfile, authfile_data
+                ],
+                {}
+            )
+            for node in node_list
+        ],
+        reporter,
+        skip_offline
+    )
+
+
+def send_all_config_to_node(
+    communicator,
+    reporter,
+    node,
+    rewrite_existing=False,
+    skip_wrong_config=False
+):
+    """
+    Send all booth configs from default booth config directory and theri
+    authfiles to specified node.
+
+    communicator -- NodeCommunicator
+    reporter -- report processor
+    node -- NodeAddress
+    rewrite_existing -- if True rewrite existing file
+    skip_wrong_config -- if True skip local configs that are unreadable
+    """
+    config_dict = booth_conf.read_configs(reporter, skip_wrong_config)
+    if not config_dict:
+        return
+
+    reporter.process(reports.booth_config_distribution_started())
+
+    file_list = []
+    for config, config_data in sorted(config_dict.items()):
+        try:
+            authfile_path = config_structure.get_authfile(
+                config_parser.parse(config_data)
+            )
+            file_list.append({
+                "name": config,
+                "data": config_data,
+                "is_authfile": False
+            })
+            if authfile_path:
+                content = booth_conf.read_authfile(reporter, authfile_path)
+                if not content:
+                    continue
+                file_list.append({
+                    "name": os.path.basename(authfile_path),
+                    "data": base64.b64encode(content).decode("utf-8"),
+                    "is_authfile": True
+                })
+        except LibraryError:
+            reporter.process(reports.booth_skipping_config(
+                config, "unable to parse config"
+            ))
+
+    data = [("data_json", json.dumps(file_list))]
+
+    if rewrite_existing:
+        data.append(("rewrite_existing", "1"))
+
+    try:
+        response = json.loads(communicator.call_node(
+            node,
+            "remote/booth_save_files",
+            NodeCommunicator.format_data_dict(data)
+        ))
+        report_list = []
+        for file in response["existing"]:
+            report_list.append(lib_reports.file_already_exists(
+                None,
+                file,
+                Severities.WARNING if rewrite_existing else Severities.ERROR,
+                (
+                    None if rewrite_existing
+                    else report_codes.FORCE_FILE_OVERWRITE
+                ),
+                node.label
+            ))
+        for file, reason in response["failed"].items():
+            report_list.append(reports.booth_config_distribution_node_error(
+                node.label, reason, file
+            ))
+        reporter.process_list(report_list)
+        reporter.process(
+            reports.booth_config_accepted_by_node(node.label, response["saved"])
+        )
+    except NodeCommunicationException as e:
+        raise LibraryError(node_communicator_exception_to_report_item(e))
+    except (KeyError, ValueError):
+        raise LibraryError(lib_reports.invalid_response_format(node.label))
+
+
+def pull_config_from_node(communicator, node, name):
+    """
+    Get config of specified booth instance and its authfile if there is one
+    from 'node'. It returns dictionary with format:
+    {
+        "config": {
+            "name": <file name of config>,
+            "data": <content of file>
+        },
+        "authfile": {
+            "name": <file name of authfile, None if it doesn't exist>,
+            "data": <base64 coded content of authfile>
+        }
+
+    communicator -- NodeCommunicator
+    node -- NodeAddresses
+    name -- name of booth instance
+    """
+    try:
+        return json.loads(communicator.call_node(
+            node,
+            "remote/booth_get_config",
+            NodeCommunicator.format_data_dict([("name", name)])
+        ))
+    except NodeCommunicationException as e:
+        raise LibraryError(node_communicator_exception_to_report_item(e))
+    except ValueError:
+        raise LibraryError(lib_reports.invalid_response_format(node.label))
diff --git a/pcs/lib/booth/test/__init__.py b/pcs/lib/booth/test/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/lib/booth/test/test_config_exchange.py b/pcs/lib/booth/test/test_config_exchange.py
new file mode 100644
index 0000000..9717a96
--- /dev/null
+++ b/pcs/lib/booth/test/test_config_exchange.py
@@ -0,0 +1,58 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+from pcs.test.tools.pcs_unittest import TestCase
+from pcs.lib.booth import config_structure, config_exchange
+
+
+class FromExchangeFormatTest(TestCase):
+    def test_convert_all_supported_items(self):
+        self.assertEqual(
+            [
+                config_structure.ConfigItem("authfile", "/path/to/auth.file"),
+                config_structure.ConfigItem("site", "1.1.1.1"),
+                config_structure.ConfigItem("site", "2.2.2.2"),
+                config_structure.ConfigItem("arbitrator", "3.3.3.3"),
+                config_structure.ConfigItem("ticket", "TA"),
+                config_structure.ConfigItem("ticket", "TB", [
+                    config_structure.ConfigItem("expire", "10")
+                ]),
+            ],
+            config_exchange.from_exchange_format([
+                {"key": "authfile","value": "/path/to/auth.file","details": []},
+                {"key": "site", "value": "1.1.1.1", "details": []},
+                {"key": "site", "value": "2.2.2.2", "details": []},
+                {"key": "arbitrator", "value": "3.3.3.3", "details": []},
+                {"key": "ticket", "value": "TA", "details": []},
+                {"key": "ticket", "value": "TB", "details": [
+                    {"key": "expire", "value": "10", "details": []}
+                ]},
+            ])
+        )
+
+
+class GetExchenageFormatTest(TestCase):
+    def test_convert_parsed_config_to_exchange_format(self):
+        self.assertEqual(
+            [
+                {"key": "site", "value": "1.1.1.1", "details": []},
+                {"key": "site", "value": "2.2.2.2", "details": []},
+                {"key": "arbitrator", "value": "3.3.3.3", "details": []},
+                {"key": "ticket", "value": "TA", "details": []},
+                {"key": "ticket", "value": "TB", "details": [
+                    {"key": "timeout", "value": "10", "details": []}
+                ]},
+            ],
+            config_exchange.to_exchange_format([
+                config_structure.ConfigItem("site", "1.1.1.1"),
+                config_structure.ConfigItem("site", "2.2.2.2"),
+                config_structure.ConfigItem("arbitrator", "3.3.3.3"),
+                config_structure.ConfigItem("ticket", "TA"),
+                config_structure.ConfigItem("ticket", "TB", [
+                    config_structure.ConfigItem("timeout", "10")
+                ]),
+            ])
+        )
diff --git a/pcs/lib/booth/test/test_config_files.py b/pcs/lib/booth/test/test_config_files.py
new file mode 100644
index 0000000..d0df256
--- /dev/null
+++ b/pcs/lib/booth/test/test_config_files.py
@@ -0,0 +1,288 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import os.path
+from pcs.test.tools.pcs_unittest import TestCase
+
+from pcs.common import report_codes, env_file_role_codes as file_roles
+from pcs.lib.booth import config_files
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.settings import booth_config_dir as BOOTH_CONFIG_DIR
+from pcs.test.tools.assertions import assert_raise_library_error, assert_report_item_list_equal
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.misc import create_patcher
+from pcs.test.tools.pcs_unittest import mock
+
+patch_config_files = create_patcher("pcs.lib.booth.config_files")
+
+ at mock.patch("os.path.isdir")
+ at mock.patch("os.listdir")
+ at mock.patch("os.path.isfile")
+class GetAllConfigsFileNamesTest(TestCase):
+    def test_booth_config_dir_is_no_dir(
+        self, mock_is_file, mock_listdir, mock_isdir
+    ):
+        mock_isdir.return_value = False
+        self.assertEqual([], config_files.get_all_configs_file_names())
+        mock_isdir.assert_called_once_with(BOOTH_CONFIG_DIR)
+        self.assertEqual(0, mock_is_file.call_count)
+        self.assertEqual(0, mock_listdir.call_count)
+
+    def test_success(self, mock_is_file, mock_listdir, mock_isdir):
+        def mock_is_file_fn(file_name):
+            if file_name in [
+                os.path.join(BOOTH_CONFIG_DIR, name)
+                for name in ("dir.cong", "dir")
+            ]:
+                return False
+            elif file_name in [
+                os.path.join(BOOTH_CONFIG_DIR, name)
+                for name in (
+                    "name1", "name2.conf", "name.conf.conf", ".conf",
+                    "name3.conf"
+                )
+            ]:
+                return True
+            else:
+                raise AssertionError("unexpected input")
+
+        mock_isdir.return_value = True
+        mock_is_file.side_effect = mock_is_file_fn
+        mock_listdir.return_value = [
+            "name1", "name2.conf", "name.conf.conf", ".conf", "name3.conf",
+            "dir.cong", "dir"
+        ]
+        self.assertEqual(
+            ["name2.conf", "name.conf.conf", "name3.conf"],
+            config_files.get_all_configs_file_names()
+        )
+        mock_listdir.assert_called_once_with(BOOTH_CONFIG_DIR)
+
+
+class ReadConfigTest(TestCase):
+    def test_success(self):
+        self.maxDiff = None
+        mock_open = mock.mock_open(read_data="config content")
+        with patch_config_files("open", mock_open, create=True):
+            self.assertEqual(
+                "config content",
+                config_files._read_config("my-file.conf")
+            )
+
+        self.assertEqual(
+            [
+                mock.call(os.path.join(BOOTH_CONFIG_DIR, "my-file.conf"), "r"),
+                mock.call().__enter__(),
+                mock.call().read(),
+                mock.call().__exit__(None, None, None)
+            ],
+            mock_open.mock_calls
+        )
+
+
+ at patch_config_files("_read_config")
+ at patch_config_files("get_all_configs_file_names")
+class ReadConfigsTest(TestCase):
+    def setUp(self):
+        self.mock_reporter = MockLibraryReportProcessor()
+
+    def test_success(self, mock_get_configs, mock_read):
+        def _mock_read_cfg(file):
+            if file == "name1.conf":
+                return "config1"
+            elif file == "name2.conf":
+                return "config2"
+            elif file == "name3.conf":
+                return "config3"
+            else:
+                raise AssertionError("unexpected input: {0}".format(file))
+        mock_get_configs.return_value = [
+            "name1.conf", "name2.conf", "name3.conf"
+        ]
+        mock_read.side_effect = _mock_read_cfg
+
+        self.assertEqual(
+            {
+                "name1.conf": "config1",
+                "name2.conf": "config2",
+                "name3.conf": "config3"
+            },
+            config_files.read_configs(self.mock_reporter)
+        )
+
+        mock_get_configs.assert_called_once_with()
+        self.assertEqual(3, mock_read.call_count)
+        mock_read.assert_has_calls([
+            mock.call("name1.conf"),
+            mock.call("name2.conf"),
+            mock.call("name3.conf")
+        ])
+        self.assertEqual(0, len(self.mock_reporter.report_item_list))
+
+    def test_skip_failed(self, mock_get_configs, mock_read):
+        def _mock_read_cfg(file):
+            if file in ["name1.conf", "name3.conf"]:
+                raise EnvironmentError()
+            elif file == "name2.conf":
+                return "config2"
+            else:
+                raise AssertionError("unexpected input: {0}".format(file))
+
+        mock_get_configs.return_value = [
+            "name1.conf", "name2.conf", "name3.conf"
+        ]
+        mock_read.side_effect = _mock_read_cfg
+
+        self.assertEqual(
+            {"name2.conf": "config2"},
+            config_files.read_configs(self.mock_reporter, True)
+        )
+        mock_get_configs.assert_called_once_with()
+        self.assertEqual(3, mock_read.call_count)
+        mock_read.assert_has_calls([
+            mock.call("name1.conf"),
+            mock.call("name2.conf"),
+            mock.call("name3.conf")
+        ])
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severities.WARNING,
+                    report_codes.BOOTH_CONFIG_READ_ERROR,
+                    {"name": "name1.conf"}
+                ),
+                (
+                    severities.WARNING,
+                    report_codes.BOOTH_CONFIG_READ_ERROR,
+                    {"name": "name3.conf"}
+                )
+            ]
+        )
+
+    def test_do_not_skip_failed(self, mock_get_configs, mock_read):
+        def _mock_read_cfg(file):
+            if file in ["name1.conf", "name3.conf"]:
+                raise EnvironmentError()
+            elif file == "name2.conf":
+                return "config2"
+            else:
+                raise AssertionError("unexpected input: {0}".format(file))
+
+        mock_get_configs.return_value = [
+            "name1.conf", "name2.conf", "name3.conf"
+        ]
+        mock_read.side_effect = _mock_read_cfg
+
+        assert_raise_library_error(
+            lambda: config_files.read_configs(self.mock_reporter),
+            (
+                severities.ERROR,
+                report_codes.BOOTH_CONFIG_READ_ERROR,
+                {"name": "name1.conf"},
+                report_codes.SKIP_UNREADABLE_CONFIG
+            ),
+            (
+                severities.ERROR,
+                report_codes.BOOTH_CONFIG_READ_ERROR,
+                {"name": "name3.conf"},
+                report_codes.SKIP_UNREADABLE_CONFIG
+            )
+        )
+        mock_get_configs.assert_called_once_with()
+        self.assertEqual(3, mock_read.call_count)
+        mock_read.assert_has_calls([
+            mock.call("name1.conf"),
+            mock.call("name2.conf"),
+            mock.call("name3.conf")
+        ])
+        self.assertEqual(2, len(self.mock_reporter.report_item_list))
+
+
+class ReadAuthfileTest(TestCase):
+    def setUp(self):
+        self.mock_reporter = MockLibraryReportProcessor()
+        self.maxDiff = None
+
+    def test_success(self):
+        path = os.path.join(BOOTH_CONFIG_DIR, "file.key")
+        mock_open = mock.mock_open(read_data="key")
+
+        with patch_config_files("open", mock_open, create=True):
+            self.assertEqual(
+                "key", config_files.read_authfile(self.mock_reporter, path)
+            )
+
+        self.assertEqual(
+            [
+                mock.call(path, "rb"),
+                mock.call().__enter__(),
+                mock.call().read(),
+                mock.call().__exit__(None, None, None)
+            ],
+            mock_open.mock_calls
+        )
+        self.assertEqual(0, len(self.mock_reporter.report_item_list))
+
+    def test_path_none(self):
+        self.assertTrue(
+            config_files.read_authfile(self.mock_reporter, None) is None
+        )
+        self.assertEqual(0, len(self.mock_reporter.report_item_list))
+
+    def test_invalid_path(self):
+        path = "/not/etc/booth/booth.key"
+        self.assertTrue(
+            config_files.read_authfile(self.mock_reporter, path) is None
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [(
+                severities.WARNING,
+                report_codes.BOOTH_UNSUPORTED_FILE_LOCATION,
+                {"file": path}
+            )]
+        )
+
+    def test_not_abs_path(self):
+        path = "/etc/booth/../booth.key"
+        self.assertTrue(
+            config_files.read_authfile(self.mock_reporter, path) is None
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [(
+                severities.WARNING,
+                report_codes.BOOTH_UNSUPORTED_FILE_LOCATION,
+                {"file": path}
+            )]
+        )
+
+    @patch_config_files("format_environment_error", return_value="reason")
+    def test_read_failure(self, _):
+        path = os.path.join(BOOTH_CONFIG_DIR, "file.key")
+        mock_open = mock.mock_open()
+        mock_open().read.side_effect = EnvironmentError()
+
+        with patch_config_files("open", mock_open, create=True):
+            return_value = config_files.read_authfile(self.mock_reporter, path)
+
+        self.assertTrue(return_value is None)
+
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [(
+                severities.WARNING,
+                report_codes.FILE_IO_ERROR,
+                {
+                    "file_role": file_roles.BOOTH_KEY,
+                    "file_path": path,
+                    "reason": "reason",
+                    "operation": "read",
+                }
+            )]
+        )
diff --git a/pcs/lib/booth/test/test_config_parser.py b/pcs/lib/booth/test/test_config_parser.py
new file mode 100644
index 0000000..c04f451
--- /dev/null
+++ b/pcs/lib/booth/test/test_config_parser.py
@@ -0,0 +1,171 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.common import report_codes
+from pcs.lib.booth import config_parser
+from pcs.lib.booth.config_structure import ConfigItem
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.pcs_unittest import TestCase
+
+
+class BuildTest(TestCase):
+    def test_build_file_content_from_parsed_structure(self):
+        self.assertEqual(
+            "\n".join([
+                "authfile = /path/to/auth.file",
+                "site = 1.1.1.1",
+                "site = 2.2.2.2",
+                "arbitrator = 3.3.3.3",
+                'ticket = "TA"',
+                'ticket = "TB"',
+                "  timeout = 10",
+                "", #newline at the end
+            ]),
+            config_parser.build([
+                ConfigItem("authfile", "/path/to/auth.file"),
+                ConfigItem("site", "1.1.1.1"),
+                ConfigItem("site", "2.2.2.2"),
+                ConfigItem("arbitrator", "3.3.3.3"),
+                ConfigItem("ticket", "TA"),
+                ConfigItem("ticket", "TB", [
+                    ConfigItem("timeout", "10")
+                ]),
+            ])
+        )
+
+
+class OrganizeLinesTest(TestCase):
+    def test_move_non_ticket_config_keys_above_tickets(self):
+        self.assertEqual(
+            [
+                ConfigItem("site", "1.1.1.1"),
+                ConfigItem('site', '2.2.2.2'),
+                ConfigItem('arbitrator', '3.3.3.3'),
+                ConfigItem("ticket", "TA"),
+            ],
+            config_parser.organize_lines([
+                ("site", "1.1.1.1"),
+                ("ticket", "TA"),
+                ('site', '2.2.2.2'),
+                ('arbitrator', '3.3.3.3'),
+            ])
+        )
+
+    def test_use_ticket_key_as_ticket_detail(self):
+        self.maxDiff = None
+        self.assertEqual(
+            [
+                ConfigItem("site", "1.1.1.1"),
+                ConfigItem('expire', '300'),
+                ConfigItem('site', '2.2.2.2'),
+                ConfigItem('arbitrator', '3.3.3.3'),
+                ConfigItem("ticket", "TA", [
+                    ConfigItem("timeout", "10"),
+                    ConfigItem('--nonexistent', 'value'),
+                    ConfigItem("expire", "300"),
+                ]),
+                ConfigItem("ticket", "TB", [
+                    ConfigItem("timeout", "20"),
+                    ConfigItem("renewal-freq", "40"),
+                ]),
+            ],
+            config_parser.organize_lines([
+                ("site", "1.1.1.1"),
+                ("expire", "300"), # out of ticket content is kept global
+                ("ticket", "TA"),
+                ("site", "2.2.2.2"), # move to global
+                ("timeout", "10"),
+                ("--nonexistent", "value"), # no global is kept under ticket
+                ("expire", "300"),
+                ("ticket", "TB"),
+                ('arbitrator', '3.3.3.3'),
+                ("timeout", "20"),
+                ("renewal-freq", "40"),
+            ])
+        )
+
+
+class ParseRawLinesTest(TestCase):
+    def test_parse_simple_correct_lines(self):
+        self.assertEqual(
+            [
+                ("site", "1.1.1.1"),
+                ('site', '2.2.2.2'),
+                ('arbitrator', '3.3.3.3'),
+                ('syntactically_correct', 'nonsense'),
+                ('line-with', 'hash#literal'),
+            ],
+            config_parser.parse_to_raw_lines("\n".join([
+                "site = 1.1.1.1",
+                " site  =  2.2.2.2 ",
+                "arbitrator=3.3.3.3",
+                "syntactically_correct = nonsense",
+                "line-with = hash#literal",
+                "",
+            ]))
+        )
+
+    def test_parse_lines_with_whole_line_comment(self):
+        self.assertEqual(
+            [("site", "1.1.1.1")],
+            config_parser.parse_to_raw_lines("\n".join([
+                " # some comment",
+                "site = 1.1.1.1",
+            ]))
+       )
+
+    def test_skip_empty_lines(self):
+        self.assertEqual(
+            [("site", "1.1.1.1")],
+            config_parser.parse_to_raw_lines("\n".join([
+                " ",
+                "site = 1.1.1.1",
+            ]))
+       )
+
+    def test_raises_when_unexpected_lines_appear(self):
+        invalid_line_list = [
+            "first invalid line",
+            "second = 'invalid line' something else #comment",
+            "third = 'invalid line 'something#'#",
+        ]
+        line_list = ["site = 1.1.1.1"] + invalid_line_list
+        with self.assertRaises(config_parser.InvalidLines) as context_manager:
+            config_parser.parse_to_raw_lines("\n".join(line_list))
+        self.assertEqual(context_manager.exception.args[0], invalid_line_list)
+
+    def test_parse_lines_finishing_with_comment(self):
+        self.assertEqual(
+            [("site", "1.1.1.1")],
+            config_parser.parse_to_raw_lines("\n".join([
+                "site = '1.1.1.1' #comment",
+            ]))
+       )
+
+class ParseTest(TestCase):
+    def test_raises_when_invalid_lines_appear(self):
+        invalid_line_list = [
+            "first invalid line",
+            "second = 'invalid line' something else #comment"
+        ]
+        line_list = ["site = 1.1.1.1"] + invalid_line_list
+        assert_raise_library_error(
+            lambda:
+                config_parser.parse("\n".join(line_list))
+            ,
+            (
+                severities.ERROR,
+                report_codes.BOOTH_CONFIG_UNEXPECTED_LINES,
+                {
+                    "line_list": invalid_line_list,
+                },
+            ),
+        )
+
+    def test_do_not_raises_when_no_invalid_liens_there(self):
+        config_parser.parse("site = 1.1.1.1")
diff --git a/pcs/lib/booth/test/test_config_structure.py b/pcs/lib/booth/test/test_config_structure.py
new file mode 100644
index 0000000..40618b2
--- /dev/null
+++ b/pcs/lib/booth/test/test_config_structure.py
@@ -0,0 +1,369 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase
+
+from pcs.common import report_codes
+from pcs.lib.booth import config_structure
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.assertions import (
+    assert_raise_library_error,
+    assert_report_item_list_equal,
+)
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.pcs_unittest import mock
+
+
+class ValidateTicketExistsTest(TestCase):
+    def test_raises_on_duplicate_ticket(self):
+        assert_raise_library_error(
+            lambda: config_structure.validate_ticket_exists(
+                [config_structure.ConfigItem("ticket", "B")], "A"
+            ),
+            (
+                severities.ERROR,
+                report_codes.BOOTH_TICKET_DOES_NOT_EXIST,
+                {
+                    "ticket_name": "A",
+                },
+            ),
+        )
+
+class ValidateTicketUniqueTest(TestCase):
+    def test_raises_on_duplicate_ticket(self):
+        assert_raise_library_error(
+            lambda: config_structure.validate_ticket_unique(
+                [config_structure.ConfigItem("ticket", "A")], "A"
+            ),
+            (
+                severities.ERROR,
+                report_codes.BOOTH_TICKET_DUPLICATE,
+                {
+                    "ticket_name": "A",
+                },
+            ),
+        )
+
+    def test_do_not_raises_when_no_duplicated_ticket(self):
+        config_structure.validate_ticket_unique([], "A")
+
+class ValidateTicketOptionsTest(TestCase):
+    def test_raises_on_invalid_options(self):
+        report_processor = MockLibraryReportProcessor()
+        expected_errors = [
+            (
+                severities.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "site",
+                    "option_type": "booth ticket",
+                    "allowed": list(config_structure.TICKET_KEYS),
+                },
+            ),
+            (
+                severities.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "port",
+                    "option_type": "booth ticket",
+                    "allowed": list(config_structure.TICKET_KEYS),
+                },
+            ),
+            (
+                severities.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "timeout",
+                    "option_value": " ",
+                    "allowed_values": "no-empty",
+                },
+            ),
+            (
+                severities.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "unknown",
+                    "option_type": "booth ticket",
+                    "allowed": list(config_structure.TICKET_KEYS),
+                },
+                report_codes.FORCE_OPTIONS
+            ),
+        ]
+        assert_raise_library_error(
+            lambda: config_structure.validate_ticket_options(
+                report_processor,
+                {
+                    "site": "a",
+                    "port": "b",
+                    "timeout": " ",
+                    "unknown": "c",
+                },
+                allow_unknown_options=False,
+            ),
+            *expected_errors
+        )
+        assert_report_item_list_equal(
+            report_processor.report_item_list,
+            expected_errors
+        )
+
+    def test_unknown_options_are_forceable(self):
+        report_processor = MockLibraryReportProcessor()
+        expected_errors = [
+            (
+                severities.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "site",
+                    "option_type": "booth ticket",
+                    "allowed": list(config_structure.TICKET_KEYS),
+                },
+            ),
+        ]
+        assert_raise_library_error(
+            lambda: config_structure.validate_ticket_options(
+                report_processor, {
+                    "site": "a",
+                    "unknown": "c",
+                },
+                allow_unknown_options=True,
+            ),
+            *expected_errors
+        )
+        assert_report_item_list_equal(
+            report_processor.report_item_list,
+            expected_errors + [
+                (
+                    severities.WARNING,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_name": "unknown",
+                        "option_type": "booth ticket",
+                        "allowed": list(config_structure.TICKET_KEYS),
+                    },
+                ),
+            ]
+        )
+
+    def test_success_on_valid_options(self):
+        report_processor = MockLibraryReportProcessor()
+        config_structure.validate_ticket_options(
+            report_processor,
+            {"timeout": "10"},
+            allow_unknown_options=False,
+        )
+        assert_report_item_list_equal(report_processor.report_item_list, [])
+
+class TicketExistsTest(TestCase):
+    def test_returns_true_if_ticket_in_structure(self):
+        self.assertTrue(config_structure.ticket_exists(
+            [config_structure.ConfigItem("ticket", "A")], "A"
+        ))
+
+    def test_returns_false_if_ticket_in_structure(self):
+        self.assertFalse(config_structure.ticket_exists(
+            [config_structure.ConfigItem("ticket", "A")], "B"
+        ))
+
+class ValidateTicketNameTest(TestCase):
+    def test_accept_valid_ticket_name(self):
+        config_structure.validate_ticket_name("abc")
+
+    def test_refuse_bad_ticket_name(self):
+        assert_raise_library_error(
+            lambda: config_structure.validate_ticket_name("@ticket"),
+            (
+                severities.ERROR,
+                report_codes.BOOTH_TICKET_NAME_INVALID,
+                {
+                    "ticket_name": "@ticket",
+                },
+            ),
+        )
+
+class ValidatePeersTest(TestCase):
+    def test_do_no_raises_on_correct_args(self):
+        config_structure.validate_peers(
+            site_list=["1.1.1.1", "2.2.2.2"],
+            arbitrator_list=["3.3.3.3"]
+        )
+
+    def test_refuse_less_than_2_sites(self):
+        assert_raise_library_error(
+            lambda: config_structure.validate_peers(
+                site_list=["1.1.1.1"],
+                arbitrator_list=["3.3.3.3", "4.4.4.4"]
+            ),
+            (
+                severities.ERROR,
+                report_codes.BOOTH_LACK_OF_SITES,
+                {
+                    "sites": ["1.1.1.1"],
+                }
+            ),
+        )
+
+    def test_refuse_even_number_peers(self):
+        assert_raise_library_error(
+            lambda: config_structure.validate_peers(
+                site_list=["1.1.1.1", "2.2.2.2"],
+                arbitrator_list=[]
+            ),
+            (
+                severities.ERROR,
+                report_codes.BOOTH_EVEN_PEERS_NUM,
+                {
+                    "number": 2,
+                }
+            ),
+        )
+
+    def test_refuse_address_duplication(self):
+        assert_raise_library_error(
+            lambda: config_structure.validate_peers(
+                site_list=["1.1.1.1", "1.1.1.1", "1.1.1.1"],
+                arbitrator_list=["3.3.3.3", "4.4.4.4"]
+            ),
+            (
+                severities.ERROR,
+                report_codes.BOOTH_ADDRESS_DUPLICATION,
+                {
+                    "addresses": set(["1.1.1.1"]),
+                }
+            ),
+        )
+
+    def test_refuse_problem_combination(self):
+        assert_raise_library_error(
+            lambda: config_structure.validate_peers(
+                site_list=["1.1.1.1"],
+                arbitrator_list=["1.1.1.1"]
+            ),
+            (
+                severities.ERROR,
+                report_codes.BOOTH_LACK_OF_SITES,
+                {
+                    "sites": ["1.1.1.1"],
+                }
+            ),
+            (
+                severities.ERROR,
+                report_codes.BOOTH_EVEN_PEERS_NUM,
+                {
+                    "number": 2,
+                }
+            ),
+            (
+                severities.ERROR,
+                report_codes.BOOTH_ADDRESS_DUPLICATION,
+                {
+                    "addresses": set(["1.1.1.1"]),
+                }
+            ),
+        )
+
+class RemoveTicketTest(TestCase):
+    @mock.patch("pcs.lib.booth.config_structure.validate_ticket_exists")
+    def test_successfully_remove_ticket(self, mock_validate_ticket_exists):
+        configuration = [
+            config_structure.ConfigItem("ticket", "some-ticket"),
+            config_structure.ConfigItem("ticket", "deprecated-ticket"),
+        ]
+        self.assertEqual(
+            config_structure.remove_ticket(configuration, "deprecated-ticket"),
+            [
+                config_structure.ConfigItem("ticket", "some-ticket"),
+            ]
+        )
+        mock_validate_ticket_exists.assert_called_once_with(
+            configuration,
+            "deprecated-ticket"
+        )
+
+class AddTicketTest(TestCase):
+    @mock.patch("pcs.lib.booth.config_structure.validate_ticket_options")
+    @mock.patch("pcs.lib.booth.config_structure.validate_ticket_unique")
+    @mock.patch("pcs.lib.booth.config_structure.validate_ticket_name")
+    def test_successfully_add_ticket(
+        self, mock_validate_name, mock_validate_uniq, mock_validate_options
+    ):
+        configuration = [
+            config_structure.ConfigItem("ticket", "some-ticket"),
+        ]
+
+        self.assertEqual(
+            config_structure.add_ticket(
+                None, configuration,
+                "new-ticket",
+                {
+                    "timeout": "10",
+                },
+                allow_unknown_options=False,
+            ),
+            [
+                config_structure.ConfigItem("ticket", "some-ticket"),
+                config_structure.ConfigItem("ticket", "new-ticket", [
+                    config_structure.ConfigItem("timeout", "10"),
+                ]),
+            ],
+        )
+
+        mock_validate_name.assert_called_once_with("new-ticket")
+        mock_validate_uniq.assert_called_once_with(configuration, "new-ticket")
+        mock_validate_options.assert_called_once_with(
+            None,
+            {"timeout": "10"},
+            False
+        )
+
+class SetAuthfileTest(TestCase):
+    def test_add_authfile(self):
+        self.assertEqual(
+            [
+                config_structure.ConfigItem("authfile", "/path/to/auth.file"),
+                config_structure.ConfigItem("site", "1.1.1.1"),
+            ],
+            config_structure.set_authfile(
+                [
+                    config_structure.ConfigItem("site", "1.1.1.1"),
+                ],
+                "/path/to/auth.file"
+            )
+        )
+    def test_reset_authfile(self):
+        self.assertEqual(
+            [
+                config_structure.ConfigItem("authfile", "/path/to/auth.file"),
+                config_structure.ConfigItem("site", "1.1.1.1"),
+            ],
+            config_structure.set_authfile(
+                [
+                    config_structure.ConfigItem("site", "1.1.1.1"),
+                    config_structure.ConfigItem("authfile", "/old/path/to/auth1.file"),
+                    config_structure.ConfigItem("authfile", "/old/path/to/auth2.file"),
+                ],
+                "/path/to/auth.file"
+            )
+        )
+
+class TakePeersTest(TestCase):
+    def test_returns_site_list_and_arbitrators_list(self):
+        self.assertEqual(
+            (
+                ["1.1.1.1", "2.2.2.2", "3.3.3.3"],
+                ["4.4.4.4", "5.5.5.5"]
+            ),
+            config_structure.take_peers(
+                [
+                    config_structure.ConfigItem("site", "1.1.1.1"),
+                    config_structure.ConfigItem("site", "2.2.2.2"),
+                    config_structure.ConfigItem("site", "3.3.3.3"),
+                    config_structure.ConfigItem("arbitrator", "4.4.4.4"),
+                    config_structure.ConfigItem("arbitrator", "5.5.5.5"),
+                ],
+            )
+        )
diff --git a/pcs/lib/booth/test/test_env.py b/pcs/lib/booth/test/test_env.py
new file mode 100644
index 0000000..993d709
--- /dev/null
+++ b/pcs/lib/booth/test/test_env.py
@@ -0,0 +1,225 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import grp
+import os
+import pwd
+from pcs.test.tools.pcs_unittest import TestCase
+
+from pcs import settings
+from pcs.common import report_codes
+from pcs.lib.booth import env
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.misc import get_test_resource as rc, create_patcher
+from pcs.test.tools.pcs_unittest import mock
+
+patch_env = create_patcher("pcs.lib.booth.env")
+
+class GetConfigFileNameTest(TestCase):
+    @patch_env("os.path.exists")
+    def test_refuse_when_name_starts_with_slash(self, mock_path_exists):
+        mock_path_exists.return_value = True
+        assert_raise_library_error(
+            lambda: env.get_config_file_name("/booth"),
+            (
+                severities.ERROR,
+                report_codes.BOOTH_INVALID_NAME,
+                {
+                    "name": "/booth",
+                    "reason": "contains illegal character '/'",
+                }
+            ),
+        )
+
+class BoothEnvTest(TestCase):
+    @patch_env("RealFile")
+    def test_get_content_from_file(self, mock_real_file):
+        mock_real_file.return_value = mock.MagicMock(
+            read=mock.MagicMock(return_value="content")
+        )
+        self.assertEqual(
+            "content",
+            env.BoothEnv("report processor", env_data={"name": "booth"})
+                .get_config_content()
+        )
+
+    @patch_env("set_keyfile_access")
+    @patch_env("RealFile")
+    def test_create_config(self, mock_real_file, mock_set_keyfile_access):
+        mock_file = mock.MagicMock(
+            assert_no_conflict_with_existing=mock.MagicMock(),
+            write=mock.MagicMock(),
+        )
+        mock_real_file.return_value = mock_file
+
+
+        env.BoothEnv(
+            "report processor",
+            env_data={"name": "booth"}
+        ).create_config("a", can_overwrite_existing=True)
+
+        self.assertEqual(mock_file.assert_no_conflict_with_existing.mock_calls,[
+            mock.call('report processor', True),
+        ])
+        self.assertEqual(mock_file.write.mock_calls, [mock.call('a')])
+
+    @patch_env("RealFile")
+    def test_push_config(self, mock_real_file):
+        mock_file = mock.MagicMock(
+            assert_no_conflict_with_existing=mock.MagicMock(),
+            write=mock.MagicMock(),
+        )
+        mock_real_file.return_value = mock_file
+        env.BoothEnv(
+            "report processor",
+            env_data={"name": "booth"}
+        ).push_config("a")
+        mock_file.write.assert_called_once_with("a")
+
+
+
+    def test_export_config_file_when_was_present_in_env_data(self):
+        self.assertEqual(
+            env.BoothEnv(
+                "report processor",
+                {
+                    "name": "booth-name",
+                    "config_file": {
+                        "content": "a\nb",
+                    },
+                    "key_file": {
+                        "content": "secure",
+                    },
+                    "key_path": "/path/to/file.key",
+                }
+            ).export(),
+            {
+                "config_file": {
+                    "content": "a\nb",
+                    "can_overwrite_existing_file": False,
+                    "no_existing_file_expected": False,
+                    "is_binary": False,
+                },
+                "key_file": {
+                    "content": "secure",
+                    "can_overwrite_existing_file": False,
+                    "no_existing_file_expected": False,
+                    "is_binary": False,
+                },
+            }
+        )
+
+    def test_do_not_export_config_file_when_no_provided(self):
+        self.assertEqual(
+            env.BoothEnv("report processor", {"name": "booth"}).export(),
+            {}
+        )
+
+class SetKeyfileAccessTest(TestCase):
+    def test_set_desired_file_access(self):
+        #setup
+        file_path = rc("temp-keyfile")
+        if os.path.exists(file_path):
+            os.remove(file_path)
+        with open(file_path, "w") as file:
+            file.write("content")
+
+        #check assumptions
+        stat = os.stat(file_path)
+        self.assertNotEqual('600', oct(stat.st_mode)[-3:])
+        current_user = pwd.getpwuid(os.getuid())[0]
+        if current_user != settings.pacemaker_uname:
+            file_user = pwd.getpwuid(stat.st_uid)[0]
+            self.assertNotEqual(file_user, settings.pacemaker_uname)
+        current_group = grp.getgrgid(os.getgid())[0]
+        if current_group != settings.pacemaker_gname:
+            file_group = grp.getgrgid(stat.st_gid)[0]
+            self.assertNotEqual(file_group, settings.pacemaker_gname)
+
+        #run tested method
+        env.set_keyfile_access(file_path)
+
+        #check
+        stat = os.stat(file_path)
+        self.assertEqual('600', oct(stat.st_mode)[-3:])
+
+        file_user = pwd.getpwuid(stat.st_uid)[0]
+        self.assertEqual(file_user, settings.pacemaker_uname)
+
+        file_group = grp.getgrgid(stat.st_gid)[0]
+        self.assertEqual(file_group, settings.pacemaker_gname)
+
+    @patch_env("pwd.getpwnam", mock.MagicMock(side_effect=KeyError))
+    @patch_env("settings.pacemaker_uname", "some-user")
+    def test_raises_when_cannot_get_uid(self):
+        assert_raise_library_error(
+            lambda: env.set_keyfile_access("/booth"),
+            (
+                severities.ERROR,
+                report_codes.UNABLE_TO_DETERMINE_USER_UID,
+                {
+                    "user": "some-user",
+                }
+            ),
+        )
+
+    @patch_env("grp.getgrnam", mock.MagicMock(side_effect=KeyError))
+    @patch_env("pwd.getpwnam", mock.MagicMock())
+    @patch_env("settings.pacemaker_gname", "some-group")
+    def test_raises_when_cannot_get_gid(self):
+        assert_raise_library_error(
+            lambda: env.set_keyfile_access("/booth"),
+            (
+                severities.ERROR,
+                report_codes.UNABLE_TO_DETERMINE_GROUP_GID,
+                {
+                    "group": "some-group",
+                }
+            ),
+        )
+
+    @patch_env("format_environment_error", mock.Mock(return_value="err"))
+    @patch_env("os.chown", mock.MagicMock(side_effect=EnvironmentError()))
+    @patch_env("grp.getgrnam", mock.MagicMock())
+    @patch_env("pwd.getpwnam", mock.MagicMock())
+    @patch_env("settings.pacemaker_gname", "some-group")
+    def test_raises_when_cannot_chown(self):
+        assert_raise_library_error(
+            lambda: env.set_keyfile_access("/booth"),
+            (
+                severities.ERROR,
+                report_codes.FILE_IO_ERROR,
+                {
+                    'reason': 'err',
+                    'file_role': u'BOOTH_KEY',
+                    'file_path': '/booth',
+                    'operation': u'chown',
+                }
+            ),
+        )
+
+    @patch_env("format_environment_error", mock.Mock(return_value="err"))
+    @patch_env("os.chmod", mock.MagicMock(side_effect=EnvironmentError()))
+    @patch_env("os.chown", mock.MagicMock())
+    @patch_env("grp.getgrnam", mock.MagicMock())
+    @patch_env("pwd.getpwnam", mock.MagicMock())
+    @patch_env("settings.pacemaker_gname", "some-group")
+    def test_raises_when_cannot_chmod(self):
+        assert_raise_library_error(
+            lambda: env.set_keyfile_access("/booth"),
+            (
+                severities.ERROR,
+                report_codes.FILE_IO_ERROR,
+                {
+                    'reason': 'err',
+                    'file_role': u'BOOTH_KEY',
+                    'file_path': '/booth',
+                    'operation': u'chmod',
+                }
+            ),
+        )
diff --git a/pcs/lib/booth/test/test_resource.py b/pcs/lib/booth/test/test_resource.py
new file mode 100644
index 0000000..8971438
--- /dev/null
+++ b/pcs/lib/booth/test/test_resource.py
@@ -0,0 +1,190 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase
+
+from lxml import etree
+
+import pcs.lib.booth.resource as booth_resource
+from pcs.test.tools.pcs_unittest import mock
+from pcs.test.tools.misc import get_test_resource as rc
+
+
+def fixture_resources_with_booth(booth_config_file_path):
+    return etree.fromstring('''
+        <resources>
+            <primitive type="booth-site">
+                <instance_attributes>
+                    <nvpair name="config" value="{0}"/>
+                </instance_attributes>
+            </primitive>
+        </resources>
+    '''.format(booth_config_file_path))
+
+def fixture_booth_element(id, booth_config_file_path):
+    return etree.fromstring('''
+        <primitive id="{0}" type="booth-site">
+            <instance_attributes>
+                <nvpair name="config" value="{1}"/>
+            </instance_attributes>
+        </primitive>
+    '''.format(id, booth_config_file_path))
+
+def fixture_ip_element(id, ip=""):
+    return etree.fromstring('''
+        <primitive id="{0}" type="IPaddr2">
+            <instance_attributes id="{0}-ia">
+            <nvpair
+                id="booth-booth-{0}-ia-ip"
+                name="ip"
+                value="{1}"
+            />
+          </instance_attributes>
+        </primitive>
+    '''.format(id, ip))
+
+class CreateResourceIdTest(TestCase):
+    @mock.patch("pcs.lib.booth.resource.find_unique_id")
+    def test_return_new_uinq_id(self, mock_find_unique_id):
+        resources_section = etree.fromstring('''<resources/>''')
+        mock_find_unique_id.side_effect = (
+            lambda resources_section, id: "{0}-n".format(id)
+        )
+        self.assertEqual(
+            "booth-some-name-ip-n",
+            booth_resource.create_resource_id(
+                resources_section, "some-name", "ip"
+            )
+        )
+
+class FindBoothResourceElementsTest(TestCase):
+    def test_returns_empty_list_when_no_matching_booth_element(self):
+        self.assertEqual([], booth_resource.find_for_config(
+            fixture_resources_with_booth("/ANOTHER/PATH/TO/CONF"),
+            "/PATH/TO/CONF"
+        ))
+
+
+    def test_returns_all_found_resource_elements(self):
+        resources = etree.fromstring('<resources/>')
+        first = fixture_booth_element("first", "/PATH/TO/CONF")
+        second = fixture_booth_element("second", "/ANOTHER/PATH/TO/CONF")
+        third = fixture_booth_element("third", "/PATH/TO/CONF")
+        for element in [first, second,third]:
+            resources.append(element)
+
+        self.assertEqual(
+            [first, third],
+            booth_resource.find_for_config(
+                resources,
+                "/PATH/TO/CONF"
+            )
+        )
+
+class RemoveFromClusterTest(TestCase):
+    def call(self, element_list):
+        mock_resource_remove = mock.Mock()
+        booth_resource.get_remover(mock_resource_remove)(element_list)
+        return mock_resource_remove
+
+    def test_remove_ip_when_is_only_booth_sibling_in_group(self):
+        group = etree.fromstring('''
+            <group>
+                <primitive id="ip" type="IPaddr2"/>
+                <primitive id="booth" type="booth-site">
+                    <instance_attributes>
+                        <nvpair name="config" value="/PATH/TO/CONF"/>
+                    </instance_attributes>
+                </primitive>
+            </group>
+        ''')
+
+        mock_resource_remove = self.call(group.getchildren()[1:])
+        self.assertEqual(
+            mock_resource_remove.mock_calls, [
+                mock.call('ip'),
+                mock.call('booth'),
+            ]
+        )
+
+class CreateInClusterTest(TestCase):
+    def test_remove_ip_when_booth_resource_add_failed(self):
+        mock_resource_create = mock.Mock(side_effect=[None, SystemExit(1)])
+        mock_resource_remove = mock.Mock()
+        mock_create_id = mock.Mock(side_effect=["ip_id","booth_id","group_id"])
+        ip = "1.2.3.4"
+        booth_config_file_path = rc("/path/to/booth.conf")
+
+        booth_resource.get_creator(mock_resource_create, mock_resource_remove)(
+            ip,
+            booth_config_file_path,
+            mock_create_id
+        )
+        self.assertEqual(mock_resource_create.mock_calls, [
+            mock.call(
+                clone_opts=[],
+                group=u'group_id',
+                meta_values=[],
+                op_values=[],
+                ra_id=u'ip_id',
+                ra_type=u'ocf:heartbeat:IPaddr2',
+                ra_values=[u'ip=1.2.3.4'],
+            ),
+            mock.call(
+                clone_opts=[],
+                group='group_id',
+                meta_values=[],
+                op_values=[],
+                ra_id='booth_id',
+                ra_type='ocf:pacemaker:booth-site',
+                ra_values=['config=/path/to/booth.conf'],
+            )
+        ])
+        mock_resource_remove.assert_called_once_with("ip_id")
+
+
+class FindBindedIpTest(TestCase):
+    def fixture_resource_section(self, ip_element_list):
+        resources_section = etree.fromstring('<resources/>')
+        group = etree.SubElement(resources_section, "group")
+        group.append(fixture_booth_element("booth1", "/PATH/TO/CONF"))
+        for ip_element in ip_element_list:
+            group.append(ip_element)
+        return resources_section
+
+
+    def test_returns_None_when_no_ip(self):
+        self.assertEqual(
+            [],
+            booth_resource.find_bound_ip(
+                self.fixture_resource_section([]),
+                "/PATH/TO/CONF",
+            )
+        )
+
+    def test_returns_ip_when_correctly_found(self):
+        self.assertEqual(
+            ["192.168.122.31"],
+            booth_resource.find_bound_ip(
+                self.fixture_resource_section([
+                    fixture_ip_element("ip1", "192.168.122.31"),
+                ]),
+                "/PATH/TO/CONF",
+            )
+        )
+
+    def test_returns_None_when_more_ip(self):
+        self.assertEqual(
+            ["192.168.122.31", "192.168.122.32"],
+            booth_resource.find_bound_ip(
+                self.fixture_resource_section([
+                    fixture_ip_element("ip1", "192.168.122.31"),
+                    fixture_ip_element("ip2", "192.168.122.32"),
+                ]),
+                "/PATH/TO/CONF",
+            )
+        )
diff --git a/pcs/lib/booth/test/test_status.py b/pcs/lib/booth/test/test_status.py
new file mode 100644
index 0000000..dfb7354
--- /dev/null
+++ b/pcs/lib/booth/test/test_status.py
@@ -0,0 +1,137 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase
+
+try:
+    # python 2
+    #pylint: disable=unused-import
+    from urlparse import parse_qs as url_decode
+except ImportError:
+    # python 3
+    from urllib.parse import parse_qs as url_decode
+
+from pcs.test.tools.pcs_unittest import mock
+from pcs.test.tools.assertions import assert_raise_library_error
+
+from pcs import settings
+from pcs.common import report_codes
+from pcs.lib.errors import ReportItemSeverity as Severities
+from pcs.lib.external import CommandRunner
+import pcs.lib.booth.status as lib
+
+
+class GetDaemonStatusTest(TestCase):
+    def setUp(self):
+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
+
+    def test_no_name(self):
+        self.mock_run.run.return_value = ("output", "", 0)
+        self.assertEqual("output", lib.get_daemon_status(self.mock_run))
+        self.mock_run.run.assert_called_once_with(
+            [settings.booth_binary, "status"]
+        )
+
+    def test_with_name(self):
+        self.mock_run.run.return_value = ("output", "", 0)
+        self.assertEqual("output", lib.get_daemon_status(self.mock_run, "name"))
+        self.mock_run.run.assert_called_once_with(
+            [settings.booth_binary, "status", "-c", "name"]
+        )
+
+    def test_daemon_not_running(self):
+        self.mock_run.run.return_value = ("", "error", 7)
+        self.assertEqual("", lib.get_daemon_status(self.mock_run))
+        self.mock_run.run.assert_called_once_with(
+            [settings.booth_binary, "status"]
+        )
+
+    def test_failure(self):
+        self.mock_run.run.return_value = ("out", "error", 1)
+        assert_raise_library_error(
+            lambda: lib.get_daemon_status(self.mock_run),
+            (
+                Severities.ERROR,
+                report_codes.BOOTH_DAEMON_STATUS_ERROR,
+                {"reason": "error\nout"}
+            )
+        )
+        self.mock_run.run.assert_called_once_with(
+            [settings.booth_binary, "status"]
+        )
+
+
+class GetTicketsStatusTest(TestCase):
+    def setUp(self):
+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
+
+    def test_no_name(self):
+        self.mock_run.run.return_value = ("output", "", 0)
+        self.assertEqual("output", lib.get_tickets_status(self.mock_run))
+        self.mock_run.run.assert_called_once_with(
+            [settings.booth_binary, "list"]
+        )
+
+    def test_with_name(self):
+        self.mock_run.run.return_value = ("output", "", 0)
+        self.assertEqual(
+            "output", lib.get_tickets_status(self.mock_run, "name")
+        )
+        self.mock_run.run.assert_called_once_with(
+            [settings.booth_binary, "list", "-c", "name"]
+        )
+
+    def test_failure(self):
+        self.mock_run.run.return_value = ("out", "error", 1)
+        assert_raise_library_error(
+            lambda: lib.get_tickets_status(self.mock_run),
+            (
+                Severities.ERROR,
+                report_codes.BOOTH_TICKET_STATUS_ERROR,
+                {
+                    "reason": "error\nout"
+                }
+            )
+        )
+        self.mock_run.run.assert_called_once_with(
+            [settings.booth_binary, "list"]
+        )
+
+
+class GetPeersStatusTest(TestCase):
+    def setUp(self):
+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
+
+    def test_no_name(self):
+        self.mock_run.run.return_value = ("output", "", 0)
+        self.assertEqual("output", lib.get_peers_status(self.mock_run))
+        self.mock_run.run.assert_called_once_with(
+            [settings.booth_binary, "peers"]
+        )
+
+    def test_with_name(self):
+        self.mock_run.run.return_value = ("output", "", 0)
+        self.assertEqual("output", lib.get_peers_status(self.mock_run, "name"))
+        self.mock_run.run.assert_called_once_with(
+            [settings.booth_binary, "peers", "-c", "name"]
+        )
+
+    def test_failure(self):
+        self.mock_run.run.return_value = ("out", "error", 1)
+        assert_raise_library_error(
+            lambda: lib.get_peers_status(self.mock_run),
+            (
+                Severities.ERROR,
+                report_codes.BOOTH_PEERS_STATUS_ERROR,
+                {
+                    "reason": "error\nout"
+                }
+            )
+        )
+        self.mock_run.run.assert_called_once_with(
+            [settings.booth_binary, "peers"]
+        )
diff --git a/pcs/lib/booth/test/test_sync.py b/pcs/lib/booth/test/test_sync.py
new file mode 100644
index 0000000..701b086
--- /dev/null
+++ b/pcs/lib/booth/test/test_sync.py
@@ -0,0 +1,1215 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase
+
+import json
+import base64
+try:
+    # python 2
+    from urlparse import parse_qs as url_decode
+except ImportError:
+    # python 3
+    from urllib.parse import parse_qs as url_decode
+
+from pcs.test.tools.pcs_unittest import mock
+from pcs.test.tools.assertions import (
+    assert_report_item_list_equal,
+    assert_raise_library_error,
+)
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+
+from pcs.common import report_codes
+from pcs.lib.node import NodeAddresses, NodeAddressesList
+from pcs.lib.errors import LibraryError, ReportItemSeverity as Severities
+from pcs.lib.external import NodeCommunicator, NodeConnectionException
+import pcs.lib.booth.sync as lib
+
+
+def to_b64(string):
+    return base64.b64encode(string.encode("utf-8")).decode("utf-8")
+
+
+class SetConfigOnNodeTest(TestCase):
+    def setUp(self):
+        self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
+        self.mock_rep = MockLibraryReportProcessor()
+        self.node = NodeAddresses("node")
+
+    def test_with_authfile(self):
+        lib._set_config_on_node(
+            self.mock_com,
+            self.mock_rep,
+            self.node,
+            "cfg_name",
+            "cfg",
+            authfile="/abs/path/my-key.key",
+            authfile_data="test key".encode("utf-8")
+        )
+        self.assertEqual(1, self.mock_com.call_node.call_count)
+        self.assertEqual(self.node, self.mock_com.call_node.call_args[0][0])
+        self.assertEqual(
+            "remote/booth_set_config", self.mock_com.call_node.call_args[0][1]
+        )
+        data = url_decode(self.mock_com.call_node.call_args[0][2])
+        self.assertTrue("data_json" in data)
+        self.assertEqual(
+            {
+                "config": {
+                    "name": "cfg_name.conf",
+                    "data": "cfg"
+                },
+                "authfile": {
+                    "name": "my-key.key",
+                    "data": to_b64("test key")
+                }
+            },
+            json.loads(data["data_json"][0])
+        )
+        assert_report_item_list_equal(
+            self.mock_rep.report_item_list,
+            [(
+                Severities.INFO,
+                report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
+                {
+                    "node": self.node.label,
+                    "name": "cfg_name",
+                    "name_list": ["cfg_name"]
+                }
+            )]
+        )
+
+    def _assert(self):
+        self.assertEqual(1, self.mock_com.call_node.call_count)
+        self.assertEqual(self.node, self.mock_com.call_node.call_args[0][0])
+        self.assertEqual(
+            "remote/booth_set_config", self.mock_com.call_node.call_args[0][1]
+        )
+        data = url_decode(self.mock_com.call_node.call_args[0][2])
+        self.assertTrue("data_json" in data)
+        self.assertEqual(
+            {
+                "config": {
+                    "name": "cfg_name.conf",
+                    "data": "cfg"
+                }
+            },
+            json.loads(data["data_json"][0])
+        )
+        assert_report_item_list_equal(
+            self.mock_rep.report_item_list,
+            [(
+                Severities.INFO,
+                report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
+                {
+                    "node": self.node.label,
+                    "name": "cfg_name",
+                    "name_list": ["cfg_name"]
+                }
+            )]
+        )
+
+    def test_authfile_data_None(self):
+        lib._set_config_on_node(
+            self.mock_com, self.mock_rep, self.node, "cfg_name", "cfg",
+            authfile="key.key"
+        )
+        self._assert()
+
+    def test_authfile_only_data(self):
+        lib._set_config_on_node(
+            self.mock_com, self.mock_rep, self.node, "cfg_name", "cfg",
+            authfile_data="key".encode("utf-8")
+        )
+        self._assert()
+
+    def test_without_authfile(self):
+        lib._set_config_on_node(
+            self.mock_com, self.mock_rep, self.node, "cfg_name", "cfg"
+        )
+        self._assert()
+
+
+ at mock.patch("pcs.lib.booth.sync.parallel_nodes_communication_helper")
+class SyncConfigInCluster(TestCase):
+    def setUp(self):
+        self.mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+        self.mock_reporter = MockLibraryReportProcessor()
+        self.node_list = NodeAddressesList(
+            [NodeAddresses("node" + str(i) for i in range(5))]
+        )
+
+    def test_without_authfile(self, mock_parallel):
+        lib.send_config_to_all_nodes(
+            self.mock_communicator,
+            self.mock_reporter,
+            self.node_list,
+            "cfg_name",
+            "config data"
+        )
+        mock_parallel.assert_called_once_with(
+            lib._set_config_on_node,
+            [
+                (
+                    [
+                        self.mock_communicator,
+                        self.mock_reporter,
+                        node,
+                        "cfg_name",
+                        "config data",
+                        None,
+                        None
+                    ],
+                    {}
+                )
+                for node in self.node_list
+            ],
+            self.mock_reporter,
+            False
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [(
+                Severities.INFO,
+                report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
+                {}
+            )]
+        )
+
+    def test_skip_offline(self, mock_parallel):
+        lib.send_config_to_all_nodes(
+            self.mock_communicator,
+            self.mock_reporter,
+            self.node_list,
+            "cfg_name",
+            "config data",
+            skip_offline=True
+        )
+        mock_parallel.assert_called_once_with(
+            lib._set_config_on_node,
+            [
+                (
+                    [
+                        self.mock_communicator,
+                        self.mock_reporter,
+                        node,
+                        "cfg_name",
+                        "config data",
+                        None,
+                        None
+                    ],
+                    {}
+                )
+                for node in self.node_list
+                ],
+            self.mock_reporter,
+            True
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [(
+                Severities.INFO,
+                report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
+                {}
+            )]
+        )
+
+    def test_with_authfile(self, mock_parallel):
+        lib.send_config_to_all_nodes(
+            self.mock_communicator,
+            self.mock_reporter,
+            self.node_list,
+            "cfg_name",
+            "config data",
+            authfile="/my/auth/file.key",
+            authfile_data="authfile data".encode("utf-8")
+        )
+        mock_parallel.assert_called_once_with(
+            lib._set_config_on_node,
+            [
+                (
+                    [
+                        self.mock_communicator,
+                        self.mock_reporter,
+                        node,
+                        "cfg_name",
+                        "config data",
+                        "/my/auth/file.key",
+                        "authfile data".encode("utf-8")
+                    ],
+                    {}
+                )
+                for node in self.node_list
+                ],
+            self.mock_reporter,
+            False
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [(
+                Severities.INFO,
+                report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
+                {}
+            )]
+        )
+
+
+ at mock.patch("pcs.lib.booth.config_structure.get_authfile")
+ at mock.patch("pcs.lib.booth.config_parser.parse")
+ at mock.patch("pcs.lib.booth.config_files.read_configs")
+ at mock.patch("pcs.lib.booth.config_files.read_authfile")
+class SendAllConfigToNodeTest(TestCase):
+    def setUp(self):
+        self.mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+        self.mock_reporter = MockLibraryReportProcessor()
+        self.node = NodeAddresses("node")
+
+    @staticmethod
+    def mock_parse_fn(config_content):
+        if config_content not in ["config1", "config2"]:
+            raise AssertionError(
+                "unexpected input {0}".format(config_content)
+            )
+        return config_content
+
+    @staticmethod
+    def mock_authfile_fn(parsed_config):
+        _data = {
+            "config1": "/path/to/file1.key",
+            "config2": "/path/to/file2.key"
+        }
+        if parsed_config not in _data:
+            raise AssertionError(
+                "unexpected input {0}".format(parsed_config)
+            )
+        return _data[parsed_config]
+
+    @staticmethod
+    def mock_read_authfile_fn(_, authfile_path):
+        _data = {
+            "/path/to/file1.key": "some key".encode("utf-8"),
+            "/path/to/file2.key": "another key".encode("utf-8"),
+        }
+        if authfile_path not in _data:
+            raise AssertionError(
+                "unexpected input {0}".format(authfile_path)
+            )
+        return _data[authfile_path]
+
+    def test_success(
+        self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
+    ):
+        mock_parse.side_effect = self.mock_parse_fn
+        mock_authfile.side_effect = self.mock_authfile_fn
+        mock_read_authfile.side_effect = self.mock_read_authfile_fn
+        mock_read_configs.return_value = {
+            "name1.conf": "config1",
+            "name2.conf": "config2"
+        }
+        self.mock_communicator.call_node.return_value = """
+        {
+            "existing": [],
+            "failed": {},
+            "saved": ["name1.conf", "file1.key", "name2.conf", "file2.key"]
+        }
+        """
+        lib.send_all_config_to_node(
+            self.mock_communicator, self.mock_reporter, self.node
+        )
+        self.assertEqual(2, mock_parse.call_count)
+        mock_parse.assert_has_calls([
+            mock.call("config1"), mock.call("config2")
+        ])
+        self.assertEqual(2, mock_authfile.call_count)
+        mock_authfile.assert_has_calls([
+            mock.call("config1"), mock.call("config2")
+        ])
+        self.assertEqual(2, mock_read_authfile.call_count)
+        mock_read_authfile.assert_has_calls([
+            mock.call(self.mock_reporter, "/path/to/file1.key"),
+            mock.call(self.mock_reporter, "/path/to/file2.key")
+        ])
+        mock_read_configs.assert_called_once_with(self.mock_reporter, False)
+        self.assertEqual(1, self.mock_communicator.call_node.call_count)
+        self.assertEqual(
+            self.node, self.mock_communicator.call_node.call_args[0][0]
+        )
+        self.assertEqual(
+            "remote/booth_save_files",
+            self.mock_communicator.call_node.call_args[0][1]
+        )
+        data = url_decode(self.mock_communicator.call_node.call_args[0][2])
+        self.assertFalse("rewrite_existing" in data)
+        self.assertTrue("data_json" in data)
+        self.assertEqual(
+            [
+                {
+                    "name": "name1.conf",
+                    "data": "config1",
+                    "is_authfile": False
+                },
+                {
+                    "name": "file1.key",
+                    "data": to_b64("some key"),
+                    "is_authfile": True
+                },
+                {
+                    "name": "name2.conf",
+                    "data": "config2",
+                    "is_authfile": False
+                },
+                {
+                    "name": "file2.key",
+                    "data": to_b64("another key"),
+                    "is_authfile": True
+                }
+            ],
+            json.loads(data["data_json"][0])
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    Severities.INFO,
+                    report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
+                    {}
+                ),
+                (
+                    Severities.INFO,
+                    report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
+                    {
+                        "node": self.node.label,
+                        "name": "name1.conf, file1.key, name2.conf, file2.key",
+                        "name_list": [
+                            "name1.conf", "file1.key", "name2.conf", "file2.key"
+                        ]
+                    }
+                )
+            ]
+        )
+
+    def test_do_not_rewrite_existing(
+        self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
+    ):
+        mock_parse.side_effect = self.mock_parse_fn
+        mock_authfile.side_effect = self.mock_authfile_fn
+        mock_read_authfile.side_effect = self.mock_read_authfile_fn
+        mock_read_configs.return_value = {
+            "name1.conf": "config1",
+            "name2.conf": "config2"
+        }
+        self.mock_communicator.call_node.return_value = """
+        {
+            "existing": ["name1.conf", "file1.key"],
+            "failed": {},
+            "saved": ["name2.conf", "file2.key"]
+        }
+        """
+        assert_raise_library_error(
+            lambda: lib.send_all_config_to_node(
+                self.mock_communicator, self.mock_reporter, self.node
+            ),
+            (
+                Severities.ERROR,
+                report_codes.FILE_ALREADY_EXISTS,
+                {
+                    "file_role": None,
+                    "file_path": "name1.conf",
+                    "node": self.node.label
+                },
+                report_codes.FORCE_FILE_OVERWRITE
+            ),
+            (
+                Severities.ERROR,
+                report_codes.FILE_ALREADY_EXISTS,
+                {
+                    "file_role": None,
+                    "file_path": "file1.key",
+                    "node": self.node.label
+                },
+                report_codes.FORCE_FILE_OVERWRITE
+            )
+        )
+        self.assertEqual(2, mock_parse.call_count)
+        mock_parse.assert_has_calls([
+            mock.call("config1"), mock.call("config2")
+        ])
+        self.assertEqual(2, mock_authfile.call_count)
+        mock_authfile.assert_has_calls([
+            mock.call("config1"), mock.call("config2")
+        ])
+        self.assertEqual(2, mock_read_authfile.call_count)
+        mock_read_authfile.assert_has_calls([
+            mock.call(self.mock_reporter, "/path/to/file1.key"),
+            mock.call(self.mock_reporter, "/path/to/file2.key")
+        ])
+        mock_read_configs.assert_called_once_with(self.mock_reporter, False)
+        self.assertEqual(1, self.mock_communicator.call_node.call_count)
+        self.assertEqual(
+            self.node, self.mock_communicator.call_node.call_args[0][0]
+        )
+        self.assertEqual(
+            "remote/booth_save_files",
+            self.mock_communicator.call_node.call_args[0][1]
+        )
+        data = url_decode(self.mock_communicator.call_node.call_args[0][2])
+        self.assertFalse("rewrite_existing" in data)
+        self.assertTrue("data_json" in data)
+        self.assertEqual(
+            [
+                {
+                    "name": "name1.conf",
+                    "data": "config1",
+                    "is_authfile": False
+                },
+                {
+                    "name": "file1.key",
+                    "data": to_b64("some key"),
+                    "is_authfile": True
+                },
+                {
+                    "name": "name2.conf",
+                    "data": "config2",
+                    "is_authfile": False
+                },
+                {
+                    "name": "file2.key",
+                    "data": to_b64("another key"),
+                    "is_authfile": True
+                }
+            ],
+            json.loads(data["data_json"][0])
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    Severities.INFO,
+                    report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
+                    {}
+                ),
+                (
+                    Severities.ERROR,
+                    report_codes.FILE_ALREADY_EXISTS,
+                    {
+                        "file_role": None,
+                        "file_path": "name1.conf",
+                        "node": self.node.label
+                    },
+                    report_codes.FORCE_FILE_OVERWRITE
+                ),
+                (
+                    Severities.ERROR,
+                    report_codes.FILE_ALREADY_EXISTS,
+                    {
+                        "file_role": None,
+                        "file_path": "file1.key",
+                        "node": self.node.label
+                    },
+                    report_codes.FORCE_FILE_OVERWRITE
+                )
+            ]
+        )
+
+    def test_rewrite_existing(
+        self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
+    ):
+        mock_parse.side_effect = self.mock_parse_fn
+        mock_authfile.side_effect = self.mock_authfile_fn
+        mock_read_authfile.side_effect = self.mock_read_authfile_fn
+        mock_read_configs.return_value = {
+            "name1.conf": "config1",
+            "name2.conf": "config2"
+        }
+        self.mock_communicator.call_node.return_value = """
+        {
+            "existing": ["name1.conf", "file1.key"],
+            "failed": {},
+            "saved": ["name2.conf", "file2.key"]
+        }
+        """
+        lib.send_all_config_to_node(
+            self.mock_communicator,
+            self.mock_reporter,
+            self.node,
+            rewrite_existing=True
+        )
+        mock_read_configs.assert_called_once_with(self.mock_reporter, False)
+        self.assertEqual(2, mock_parse.call_count)
+        mock_parse.assert_has_calls([
+            mock.call("config1"), mock.call("config2")
+        ])
+        self.assertEqual(2, mock_authfile.call_count)
+        mock_authfile.assert_has_calls([
+            mock.call("config1"), mock.call("config2")
+        ])
+        self.assertEqual(2, mock_read_authfile.call_count)
+        mock_read_authfile.assert_has_calls([
+            mock.call(self.mock_reporter, "/path/to/file1.key"),
+            mock.call(self.mock_reporter, "/path/to/file2.key")
+        ])
+        self.assertEqual(1, self.mock_communicator.call_node.call_count)
+        self.assertEqual(
+            self.node, self.mock_communicator.call_node.call_args[0][0]
+        )
+        self.assertEqual(
+            "remote/booth_save_files",
+            self.mock_communicator.call_node.call_args[0][1]
+        )
+        data = url_decode(self.mock_communicator.call_node.call_args[0][2])
+        self.assertTrue("rewrite_existing" in data)
+        self.assertTrue("data_json" in data)
+        self.assertEqual(
+            [
+                {
+                    "name": "name1.conf",
+                    "data": "config1",
+                    "is_authfile": False
+                },
+                {
+                    "name": "file1.key",
+                    "data": to_b64("some key"),
+                    "is_authfile": True
+                },
+                {
+                    "name": "name2.conf",
+                    "data": "config2",
+                    "is_authfile": False
+                },
+                {
+                    "name": "file2.key",
+                    "data": to_b64("another key"),
+                    "is_authfile": True
+                }
+            ],
+            json.loads(data["data_json"][0])
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    Severities.INFO,
+                    report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
+                    {}
+                ),
+                (
+                    Severities.WARNING,
+                    report_codes.FILE_ALREADY_EXISTS,
+                    {
+                        "file_role": None,
+                        "file_path": "name1.conf",
+                        "node": self.node.label
+                    }
+                ),
+                (
+                    Severities.WARNING,
+                    report_codes.FILE_ALREADY_EXISTS,
+                    {
+                        "file_role": None,
+                        "file_path": "file1.key",
+                        "node": self.node.label
+                    }
+                ),
+                (
+                    Severities.INFO,
+                    report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
+                    {
+                        "node": self.node.label,
+                        "name": "name2.conf, file2.key",
+                        "name_list": ["name2.conf", "file2.key"]
+                    }
+                )
+            ]
+        )
+
+    def test_write_failure(
+        self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
+    ):
+        mock_parse.side_effect = self.mock_parse_fn
+        mock_authfile.side_effect = self.mock_authfile_fn
+        mock_read_authfile.side_effect = self.mock_read_authfile_fn
+        mock_read_configs.return_value = {
+            "name1.conf": "config1",
+            "name2.conf": "config2"
+        }
+        self.mock_communicator.call_node.return_value = """
+        {
+            "existing": [],
+            "failed": {
+                "name1.conf": "Error message",
+                "file1.key": "Another error message"
+            },
+            "saved": ["name2.conf", "file2.key"]
+        }
+        """
+        assert_raise_library_error(
+            lambda: lib.send_all_config_to_node(
+                self.mock_communicator, self.mock_reporter, self.node
+            ),
+            (
+                Severities.ERROR,
+                report_codes.BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR,
+                {
+                    "node": self.node.label,
+                    "name": "name1.conf",
+                    "reason": "Error message"
+                }
+            ),
+            (
+                Severities.ERROR,
+                report_codes.BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR,
+                {
+                    "node": self.node.label,
+                    "name": "file1.key",
+                    "reason": "Another error message"
+                }
+            )
+        )
+        self.assertEqual(2, mock_parse.call_count)
+        mock_parse.assert_has_calls([
+            mock.call("config1"), mock.call("config2")
+        ])
+        self.assertEqual(2, mock_authfile.call_count)
+        mock_authfile.assert_has_calls([
+            mock.call("config1"), mock.call("config2")
+        ])
+        self.assertEqual(2, mock_read_authfile.call_count)
+        mock_read_authfile.assert_has_calls([
+            mock.call(self.mock_reporter, "/path/to/file1.key"),
+            mock.call(self.mock_reporter, "/path/to/file2.key")
+        ])
+        mock_read_configs.assert_called_once_with(self.mock_reporter, False)
+        self.assertEqual(1, self.mock_communicator.call_node.call_count)
+        self.assertEqual(
+            self.node, self.mock_communicator.call_node.call_args[0][0]
+        )
+        self.assertEqual(
+            "remote/booth_save_files",
+            self.mock_communicator.call_node.call_args[0][1]
+        )
+        data = url_decode(self.mock_communicator.call_node.call_args[0][2])
+        self.assertFalse("rewrite_existing" in data)
+        self.assertTrue("data_json" in data)
+        self.assertEqual(
+            [
+                {
+                    "name": "name1.conf",
+                    "data": "config1",
+                    "is_authfile": False
+                },
+                {
+                    "name": "file1.key",
+                    "data": to_b64("some key"),
+                    "is_authfile": True
+                },
+                {
+                    "name": "name2.conf",
+                    "data": "config2",
+                    "is_authfile": False
+                },
+                {
+                    "name": "file2.key",
+                    "data": to_b64("another key"),
+                    "is_authfile": True
+                }
+            ],
+            json.loads(data["data_json"][0])
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    Severities.INFO,
+                    report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
+                    {}
+                ),
+                (
+                    Severities.ERROR,
+                    report_codes.BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR,
+                    {
+                        "node": self.node.label,
+                        "name": "name1.conf",
+                        "reason": "Error message"
+                    }
+                ),
+                (
+                    Severities.ERROR,
+                    report_codes.BOOTH_CONFIG_DISTRIBUTION_NODE_ERROR,
+                    {
+                        "node": self.node.label,
+                        "name": "file1.key",
+                        "reason": "Another error message"
+                    }
+                )
+            ]
+        )
+
+    def test_communication_failure(
+        self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
+    ):
+        mock_parse.side_effect = self.mock_parse_fn
+        mock_authfile.side_effect = self.mock_authfile_fn
+        mock_read_authfile.side_effect = self.mock_read_authfile_fn
+        mock_read_configs.return_value = {
+            "name1.conf": "config1",
+            "name2.conf": "config2"
+        }
+        self.mock_communicator.call_node.side_effect = NodeConnectionException(
+            self.node.label, "command", "reason"
+        )
+        assert_raise_library_error(
+            lambda: lib.send_all_config_to_node(
+                self.mock_communicator, self.mock_reporter, self.node
+            ),
+            (
+                Severities.ERROR,
+                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+                {
+                    "node": self.node.label,
+                    "command": "command",
+                    "reason": "reason"
+                }
+            )
+        )
+        self.assertEqual(2, mock_parse.call_count)
+        mock_parse.assert_has_calls([
+            mock.call("config1"), mock.call("config2")
+        ])
+        self.assertEqual(2, mock_authfile.call_count)
+        mock_authfile.assert_has_calls([
+            mock.call("config1"), mock.call("config2")
+        ])
+        self.assertEqual(2, mock_read_authfile.call_count)
+        mock_read_authfile.assert_has_calls([
+            mock.call(self.mock_reporter, "/path/to/file1.key"),
+            mock.call(self.mock_reporter, "/path/to/file2.key")
+        ])
+        mock_read_configs.assert_called_once_with(self.mock_reporter, False)
+        self.assertEqual(1, self.mock_communicator.call_node.call_count)
+        self.assertEqual(
+            self.node, self.mock_communicator.call_node.call_args[0][0]
+        )
+        self.assertEqual(
+            "remote/booth_save_files",
+            self.mock_communicator.call_node.call_args[0][1]
+        )
+        data = url_decode(self.mock_communicator.call_node.call_args[0][2])
+        self.assertFalse("rewrite_existing" in data)
+        self.assertTrue("data_json" in data)
+        self.assertEqual(
+            [
+                {
+                    "name": "name1.conf",
+                    "data": "config1",
+                    "is_authfile": False
+                },
+                {
+                    "name": "file1.key",
+                    "data": to_b64("some key"),
+                    "is_authfile": True
+                },
+                {
+                    "name": "name2.conf",
+                    "data": "config2",
+                    "is_authfile": False
+                },
+                {
+                    "name": "file2.key",
+                    "data": to_b64("another key"),
+                    "is_authfile": True
+                }
+            ],
+            json.loads(data["data_json"][0])
+        )
+
+    def test_wrong_response_format(
+        self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
+    ):
+        mock_parse.side_effect = self.mock_parse_fn
+        mock_authfile.side_effect = self.mock_authfile_fn
+        mock_read_authfile.side_effect = self.mock_read_authfile_fn
+        mock_read_configs.return_value = {
+            "name1.conf": "config1",
+            "name2.conf": "config2"
+        }
+        self.mock_communicator.call_node.return_value = """
+            {
+                "existing_files": [],
+                "failed": {
+                    "name1.conf": "Error message",
+                    "file1.key": "Another error message"
+                },
+                "saved": ["name2.conf", "file2.key"]
+            }
+        """
+        assert_raise_library_error(
+            lambda: lib.send_all_config_to_node(
+                self.mock_communicator, self.mock_reporter, self.node
+            ),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_RESPONSE_FORMAT,
+                {"node": self.node.label}
+            )
+        )
+        self.assertEqual(2, mock_parse.call_count)
+        mock_parse.assert_has_calls([
+            mock.call("config1"), mock.call("config2")
+        ])
+        self.assertEqual(2, mock_authfile.call_count)
+        mock_authfile.assert_has_calls([
+            mock.call("config1"), mock.call("config2")
+        ])
+        self.assertEqual(2, mock_read_authfile.call_count)
+        mock_read_authfile.assert_has_calls([
+            mock.call(self.mock_reporter, "/path/to/file1.key"),
+            mock.call(self.mock_reporter, "/path/to/file2.key")
+        ])
+        mock_read_configs.assert_called_once_with(self.mock_reporter, False)
+        self.assertEqual(1, self.mock_communicator.call_node.call_count)
+        self.assertEqual(
+            self.node, self.mock_communicator.call_node.call_args[0][0]
+        )
+        self.assertEqual(
+            "remote/booth_save_files",
+            self.mock_communicator.call_node.call_args[0][1]
+        )
+        data = url_decode(self.mock_communicator.call_node.call_args[0][2])
+        self.assertFalse("rewrite_existing" in data)
+        self.assertTrue("data_json" in data)
+        self.assertEqual(
+            [
+                {
+                    "name": "name1.conf",
+                    "data": "config1",
+                    "is_authfile": False
+                },
+                {
+                    "name": "file1.key",
+                    "data": to_b64("some key"),
+                    "is_authfile": True
+                },
+                {
+                    "name": "name2.conf",
+                    "data": "config2",
+                    "is_authfile": False
+                },
+                {
+                    "name": "file2.key",
+                    "data": to_b64("another key"),
+                    "is_authfile": True
+                }
+            ],
+            json.loads(data["data_json"][0])
+        )
+
+    def test_response_not_json(
+        self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
+    ):
+        mock_parse.side_effect = self.mock_parse_fn
+        mock_authfile.side_effect = self.mock_authfile_fn
+        mock_read_authfile.side_effect = self.mock_read_authfile_fn
+        mock_read_configs.return_value = {
+            "name1.conf": "config1",
+            "name2.conf": "config2"
+        }
+        self.mock_communicator.call_node.return_value = "not json"
+        assert_raise_library_error(
+            lambda: lib.send_all_config_to_node(
+                self.mock_communicator, self.mock_reporter, self.node
+            ),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_RESPONSE_FORMAT,
+                {"node": self.node.label}
+            )
+        )
+        self.assertEqual(2, mock_parse.call_count)
+        mock_parse.assert_has_calls([
+            mock.call("config1"), mock.call("config2")
+        ])
+        self.assertEqual(2, mock_authfile.call_count)
+        mock_authfile.assert_has_calls([
+            mock.call("config1"), mock.call("config2")
+        ])
+        self.assertEqual(2, mock_read_authfile.call_count)
+        mock_read_authfile.assert_has_calls([
+            mock.call(self.mock_reporter, "/path/to/file1.key"),
+            mock.call(self.mock_reporter, "/path/to/file2.key")
+        ])
+        mock_read_configs.assert_called_once_with(self.mock_reporter, False)
+        self.assertEqual(1, self.mock_communicator.call_node.call_count)
+        self.assertEqual(
+            self.node, self.mock_communicator.call_node.call_args[0][0]
+        )
+        self.assertEqual(
+            "remote/booth_save_files",
+            self.mock_communicator.call_node.call_args[0][1]
+        )
+        data = url_decode(self.mock_communicator.call_node.call_args[0][2])
+        self.assertFalse("rewrite_existing" in data)
+        self.assertTrue("data_json" in data)
+        self.assertEqual(
+            [
+                {
+                    "name": "name1.conf",
+                    "data": "config1",
+                    "is_authfile": False
+                },
+                {
+                    "name": "file1.key",
+                    "data": to_b64("some key"),
+                    "is_authfile": True
+                },
+                {
+                    "name": "name2.conf",
+                    "data": "config2",
+                    "is_authfile": False
+                },
+                {
+                    "name": "file2.key",
+                    "data": to_b64("another key"),
+                    "is_authfile": True
+                }
+            ],
+            json.loads(data["data_json"][0])
+        )
+
+
+    def test_configs_without_authfiles(
+        self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
+    ):
+        def mock_authfile_fn(parsed_config):
+            if parsed_config == "config1":
+                return None
+            elif parsed_config == "config2":
+                return "/path/to/file2.key"
+            else:
+                raise AssertionError(
+                    "unexpected input: {0}".format(parsed_config)
+                )
+
+        mock_parse.side_effect = self.mock_parse_fn
+        mock_authfile.side_effect = mock_authfile_fn
+        mock_read_authfile.return_value = "another key".encode("utf-8")
+        mock_read_configs.return_value = {
+            "name1.conf": "config1",
+            "name2.conf": "config2"
+        }
+        self.mock_communicator.call_node.return_value = """
+        {
+            "existing": [],
+            "failed": {},
+            "saved": ["name1.conf", "name2.conf", "file2.key"]
+        }
+        """
+        lib.send_all_config_to_node(
+            self.mock_communicator, self.mock_reporter, self.node
+        )
+        self.assertEqual(2, mock_parse.call_count)
+        mock_parse.assert_has_calls([
+            mock.call("config1"), mock.call("config2")
+        ])
+        self.assertEqual(2, mock_authfile.call_count)
+        mock_authfile.assert_has_calls([
+            mock.call("config1"), mock.call("config2")
+        ])
+        mock_read_authfile.assert_called_once_with(
+            self.mock_reporter, "/path/to/file2.key"
+        )
+        mock_read_configs.assert_called_once_with(self.mock_reporter, False)
+        self.assertEqual(1, self.mock_communicator.call_node.call_count)
+        self.assertEqual(
+            self.node, self.mock_communicator.call_node.call_args[0][0]
+        )
+        self.assertEqual(
+            "remote/booth_save_files",
+            self.mock_communicator.call_node.call_args[0][1]
+        )
+        data = url_decode(self.mock_communicator.call_node.call_args[0][2])
+        self.assertFalse("rewrite_existing" in data)
+        self.assertTrue("data_json" in data)
+        self.assertEqual(
+            [
+                {
+                    "name": "name1.conf",
+                    "data": "config1",
+                    "is_authfile": False
+                },
+                {
+                    "name": "name2.conf",
+                    "data": "config2",
+                    "is_authfile": False
+                },
+                {
+                    "name": "file2.key",
+                    "data": to_b64("another key"),
+                    "is_authfile": True
+                }
+            ],
+            json.loads(data["data_json"][0])
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    Severities.INFO,
+                    report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
+                    {}
+                ),
+                (
+                    Severities.INFO,
+                    report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
+                    {
+                        "node": self.node.label,
+                        "name": "name1.conf, name2.conf, file2.key",
+                        "name_list": ["name1.conf", "name2.conf", "file2.key"]
+                    }
+                )
+            ]
+        )
+
+    def test_unable_to_parse_config(
+        self, mock_read_authfile, mock_read_configs, mock_parse, mock_authfile
+    ):
+        def mock_parse_fn(config_data):
+            if config_data == "config1":
+                raise LibraryError()
+            elif config_data == "config2":
+                return "config2"
+            else:
+                raise AssertionError(
+                    "unexpected input: {0}".format(config_data)
+                )
+
+        mock_parse.side_effect = mock_parse_fn
+        mock_authfile.return_value = "/path/to/file2.key"
+        mock_read_authfile.return_value = "another key".encode("utf-8")
+        mock_read_configs.return_value = {
+            "name1.conf": "config1",
+            "name2.conf": "config2"
+        }
+        self.mock_communicator.call_node.return_value = """
+         {
+             "existing": [],
+             "failed": {},
+             "saved": ["name2.conf", "file2.key"]
+         }
+         """
+        lib.send_all_config_to_node(
+            self.mock_communicator, self.mock_reporter, self.node
+        )
+        self.assertEqual(2, mock_parse.call_count)
+        mock_parse.assert_has_calls([
+            mock.call("config1"), mock.call("config2")
+        ])
+        mock_authfile.assert_called_once_with("config2")
+        mock_read_authfile.assert_called_once_with(
+            self.mock_reporter, "/path/to/file2.key"
+        )
+        mock_read_configs.assert_called_once_with(self.mock_reporter, False)
+        self.assertEqual(1, self.mock_communicator.call_node.call_count)
+        self.assertEqual(
+            self.node, self.mock_communicator.call_node.call_args[0][0]
+        )
+        self.assertEqual(
+            "remote/booth_save_files",
+            self.mock_communicator.call_node.call_args[0][1]
+        )
+        data = url_decode(self.mock_communicator.call_node.call_args[0][2])
+        self.assertFalse("rewrite_existing" in data)
+        self.assertTrue("data_json" in data)
+        self.assertEqual(
+            [
+                {
+                    "name": "name2.conf",
+                    "data": "config2",
+                    "is_authfile": False
+                },
+                {
+                    "name": "file2.key",
+                    "data": to_b64("another key"),
+                    "is_authfile": True
+                }
+            ],
+            json.loads(data["data_json"][0])
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    Severities.INFO,
+                    report_codes.BOOTH_CONFIG_DISTRIBUTION_STARTED,
+                    {}
+                ),
+                (
+                    Severities.WARNING,
+                    report_codes.BOOTH_SKIPPING_CONFIG,
+                    {
+                        "config_file": "name1.conf"
+                    }
+                ),
+                (
+                    Severities.INFO,
+                    report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
+                    {
+                        "node": self.node.label,
+                        "name": "name2.conf, file2.key",
+                        "name_list": ["name2.conf", "file2.key"]
+                    }
+                )
+            ]
+        )
+
+
+class PullConfigFromNodeTest(TestCase):
+    def setUp(self):
+        self.mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+        self.node = NodeAddresses("node")
+
+    def test_success(self):
+        self.mock_communicator.call_node.return_value = "{}"
+        self.assertEqual(
+            {}, lib.pull_config_from_node(
+                self.mock_communicator, self.node, "booth"
+            )
+        )
+        self.mock_communicator.call_node.assert_called_once_with(
+            self.node, "remote/booth_get_config", "name=booth"
+        )
+
+    def test_not_json(self):
+        self.mock_communicator.call_node.return_value = "not json"
+        assert_raise_library_error(
+            lambda: lib.pull_config_from_node(
+                self.mock_communicator, self.node, "booth"
+            ),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_RESPONSE_FORMAT,
+                {"node": self.node.label}
+            )
+        )
+
+    def test_communication_failure(self):
+        self.mock_communicator.call_node.side_effect = NodeConnectionException(
+            self.node.label, "command", "reason"
+        )
+        assert_raise_library_error(
+            lambda: lib.pull_config_from_node(
+                self.mock_communicator, self.node, "booth"
+            ),
+            (
+                Severities.ERROR,
+                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+                {
+                    "node": self.node.label,
+                    "command": "command",
+                    "reason": "reason"
+                }
+            )
+        )
diff --git a/pcs/lib/cib/alert.py b/pcs/lib/cib/alert.py
index 6b72996..b5fe88c 100644
--- a/pcs/lib/cib/alert.py
+++ b/pcs/lib/cib/alert.py
@@ -7,14 +7,16 @@ from __future__ import (
 
 from lxml import etree
 
+from pcs.common import report_codes
 from pcs.lib import reports
-from pcs.lib.errors import LibraryError
+from pcs.lib.errors import LibraryError, ReportItemSeverity as Severities
 from pcs.lib.cib.nvpair import update_nvset, get_nvset
 from pcs.lib.cib.tools import (
     check_new_id_applicable,
     get_sub_element,
     find_unique_id,
     get_alerts,
+    validate_id_does_not_exist,
 )
 
 
@@ -61,7 +63,7 @@ def _update_optional_attribute(element, attribute, value):
 def get_alert_by_id(tree, alert_id):
     """
     Returns alert element with specified id.
-    Raises AlertNotFound if alert with specified id doesn't exist.
+    Raises LibraryError if alert with specified id doesn't exist.
 
     tree -- cib etree node
     alert_id -- id of alert
@@ -72,25 +74,53 @@ def get_alert_by_id(tree, alert_id):
     return alert
 
 
-def get_recipient(alert, recipient_value):
+def get_recipient_by_id(tree, recipient_id):
     """
     Returns recipient element with value recipient_value which belong to
     specified alert.
-    Raises RecipientNotFound if recipient doesn't exist.
+    Raises LibraryError if recipient doesn't exist.
 
-    alert -- parent element of required recipient
-    recipient_value -- value of recipient
+    tree -- cib etree node
+    recipient_id -- id of recipient
     """
-    recipient = alert.find(
-        "./recipient[@value='{0}']".format(recipient_value)
+    recipient = get_alerts(tree).find(
+        "./alert/recipient[@id='{0}']".format(recipient_id)
     )
     if recipient is None:
-        raise LibraryError(reports.cib_alert_recipient_not_found(
-            alert.get("id"), recipient_value
-        ))
+        raise LibraryError(reports.id_not_found(recipient_id, "Recipient"))
     return recipient
 
 
+def ensure_recipient_value_is_unique(
+    reporter, alert, recipient_value, recipient_id="", allow_duplicity=False
+):
+    """
+    Ensures that recipient_value is unique in alert.
+
+    reporter -- report processor
+    alert -- alert
+    recipient_value -- recipient value
+    recipient_id -- recipient id of to which value belongs to
+    allow_duplicity -- if True only warning will be shown if value already
+        exists
+    """
+    recipient_list = alert.xpath(
+        "./recipient[@value='{value}' and @id!='{id}']".format(
+            value=recipient_value, id=recipient_id
+        )
+    )
+    if recipient_list:
+        reporter.process(reports.cib_alert_recipient_already_exists(
+            alert.get("id", None),
+            recipient_value,
+            Severities.WARNING if allow_duplicity else Severities.ERROR,
+            forceable=(
+                None if allow_duplicity
+                else report_codes.FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE
+            )
+        ))
+
+
 def create_alert(tree, alert_id, path, description=""):
     """
     Create new alert element. Returns newly created element.
@@ -116,7 +146,7 @@ def create_alert(tree, alert_id, path, description=""):
 def update_alert(tree, alert_id, path, description=None):
     """
     Update existing alert. Return updated alert element.
-    Raises AlertNotFound if alert with specified id doesn't exist.
+    Raises LibraryError if alert with specified id doesn't exist.
 
     tree -- cib etree node
     alert_id -- id of alert to be updated
@@ -134,7 +164,7 @@ def update_alert(tree, alert_id, path, description=None):
 def remove_alert(tree, alert_id):
     """
     Remove alert with specified id.
-    Raises AlertNotFound if alert with specified id doesn't exist.
+    Raises LibraryError if alert with specified id doesn't exist.
 
     tree -- cib etree node
     alert_id -- id of alert which should be removed
@@ -144,36 +174,38 @@ def remove_alert(tree, alert_id):
 
 
 def add_recipient(
+    reporter,
     tree,
     alert_id,
     recipient_value,
-    description=""
+    recipient_id=None,
+    description="",
+    allow_same_value=False
 ):
     """
     Add recipient to alert with specified id. Returns added recipient element.
-    Raises AlertNotFound if alert with specified id doesn't exist.
+    Raises LibraryError if alert with specified recipient_id doesn't exist.
     Raises LibraryError if recipient already exists.
 
+    reporter -- report processor
     tree -- cib etree node
     alert_id -- id of alert which should be parent of new recipient
     recipient_value -- value of recipient
+    recipient_id -- id of new recipient, if None it will be generated
     description -- description of recipient
+    allow_same_value -- if True unique recipient value is not required
     """
-    alert = get_alert_by_id(tree, alert_id)
+    if recipient_id is None:
+        recipient_id = find_unique_id(tree, "{0}-recipient".format(alert_id))
+    else:
+        validate_id_does_not_exist(tree, recipient_id)
 
-    recipient = alert.find(
-        "./recipient[@value='{0}']".format(recipient_value)
+    alert = get_alert_by_id(tree, alert_id)
+    ensure_recipient_value_is_unique(
+        reporter, alert, recipient_value, allow_duplicity=allow_same_value
     )
-    if recipient is not None:
-        raise LibraryError(reports.cib_alert_recipient_already_exists(
-            alert_id, recipient_value
-        ))
-
     recipient = etree.SubElement(
-        alert,
-        "recipient",
-        id=find_unique_id(tree, "{0}-recipient".format(alert_id)),
-        value=recipient_value
+        alert, "recipient", id=recipient_id, value=recipient_value
     )
 
     if description:
@@ -182,38 +214,49 @@ def add_recipient(
     return recipient
 
 
-def update_recipient(tree, alert_id, recipient_value, description):
+def update_recipient(
+    reporter,
+    tree,
+    recipient_id,
+    recipient_value=None,
+    description=None,
+    allow_same_value=False
+):
     """
     Update specified recipient. Returns updated recipient element.
-    Raises AlertNotFound if alert with specified id doesn't exist.
-    Raises RecipientNotFound if recipient doesn't exist.
+    Raises LibraryError if recipient doesn't exist.
 
+    reporter -- report processor
     tree -- cib etree node
-    alert_id -- id of alert, parent element of recipient
-    recipient_value -- recipient value
+    recipient_id -- id of recipient to be updated
+    recipient_value -- recipient value, stay unchanged if None
     description -- description, if empty it will be removed, stay unchanged
         if None
+    allow_same_value -- if True unique recipient value is not required
     """
-    recipient = get_recipient(
-        get_alert_by_id(tree, alert_id), recipient_value
-    )
+    recipient = get_recipient_by_id(tree, recipient_id)
+    if recipient_value is not None:
+        ensure_recipient_value_is_unique(
+            reporter,
+            recipient.getparent(),
+            recipient_value,
+            recipient_id=recipient_id,
+            allow_duplicity=allow_same_value
+        )
+        recipient.set("value", recipient_value)
     _update_optional_attribute(recipient, "description", description)
     return recipient
 
 
-def remove_recipient(tree, alert_id, recipient_value):
+def remove_recipient(tree, recipient_id):
     """
     Remove specified recipient.
-    Raises AlertNotFound if alert with specified id doesn't exist.
-    Raises RecipientNotFound if recipient doesn't exist.
+    Raises LibraryError if recipient doesn't exist.
 
     tree -- cib etree node
-    alert_id -- id of alert, parent element of recipient
-    recipient_value -- recipient value
+    recipient_id -- id of recipient to be removed
     """
-    recipient = get_recipient(
-        get_alert_by_id(tree, alert_id), recipient_value
-    )
+    recipient = get_recipient_by_id(tree, recipient_id)
     recipient.getparent().remove(recipient)
 
 
diff --git a/pcs/lib/cib/constraint/ticket.py b/pcs/lib/cib/constraint/ticket.py
index 4154aac..85d045c 100644
--- a/pcs/lib/cib/constraint/ticket.py
+++ b/pcs/lib/cib/constraint/ticket.py
@@ -39,7 +39,8 @@ def _validate_options_common(options):
 def _create_id(cib, ticket, resource_id, resource_role):
     return tools.find_unique_id(
         cib,
-        "-".join(('ticket', ticket, resource_id, resource_role))
+        "-".join(('ticket', ticket, resource_id))
+        +("-{0}".format(resource_role) if resource_role else "")
     )
 
 def prepare_options_with_set(cib, options, resource_set_list):
@@ -93,7 +94,7 @@ def prepare_options_plain(cib, options, ticket, resource_id):
             cib,
             options["ticket"],
             resource_id,
-            options["rsc-role"] if "rsc-role" in options else "no-role"
+            options.get("rsc-role", "")
         ),
         partial(tools.check_new_id_applicable, cib, DESCRIPTION)
     )
@@ -103,6 +104,34 @@ def create_plain(constraint_section, options):
     element.attrib.update(options)
     return element
 
+def remove_plain(constraint_section, ticket_key, resource_id):
+    ticket_element_list = constraint_section.xpath(
+        './/rsc_ticket[@ticket="{0}" and @rsc="{1}"]'
+        .format(ticket_key, resource_id)
+    )
+
+    for ticket_element in ticket_element_list:
+        ticket_element.getparent().remove(ticket_element)
+
+    return len(ticket_element_list) > 0
+
+def remove_with_resource_set(constraint_section, ticket_key, resource_id):
+    ref_element_list = constraint_section.xpath(
+        './/rsc_ticket[@ticket="{0}"]/resource_set/resource_ref[@id="{1}"]'
+        .format(ticket_key, resource_id)
+    )
+
+    for ref_element in ref_element_list:
+        set_element = ref_element.getparent()
+        set_element.remove(ref_element)
+        if not len(set_element):
+            ticket_element = set_element.getparent()
+            ticket_element.remove(set_element)
+            if not len(ticket_element):
+                ticket_element.getparent().remove(ticket_element)
+
+    return len(ref_element_list) > 0
+
 def are_duplicate_plain(element, other_element):
     return all(
         element.attrib.get(name, "") == other_element.attrib.get(name, "")
diff --git a/pcs/lib/cib/test/test_alert.py b/pcs/lib/cib/test/test_alert.py
index c387aaf..c47dd1e 100644
--- a/pcs/lib/cib/test/test_alert.py
+++ b/pcs/lib/cib/test/test_alert.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
 from lxml import etree
 
@@ -15,8 +15,10 @@ from pcs.lib.errors import ReportItemSeverity as severities
 from pcs.test.tools.assertions import(
     assert_raise_library_error,
     assert_xml_equal,
+    assert_report_item_list_equal,
 )
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
 
 
 @mock.patch("pcs.lib.cib.alert.update_nvset")
@@ -129,54 +131,146 @@ class GetAlertByIdTest(TestCase):
         )
 
 
-class GetRecipientTest(TestCase):
+class GetRecipientByIdTest(TestCase):
     def setUp(self):
         self.xml = etree.XML(
             """
-                <alert id="alert-1">
-                    <recipient id="rec-1" value="value1"/>
-                    <recipient id="rec-2" value="value2"/>
-                    <not_recipient value="value3"/>
-                    <recipients>
-                        <recipient id="rec-4" value="value4"/>
-                    </recipients>
-                </alert>
+                <cib>
+                    <configuration>
+                        <alerts>
+                            <alert id="alert-1">
+                                <recipient id="rec-1" value="value1"/>
+                                <not_recipient id="rec-3" value="value3"/>
+                                <recipients>
+                                    <recipient id="rec-4" value="value4"/>
+                                </recipients>
+                            </alert>
+                            <recipient id="rec-2" value="value2"/>
+                        </alerts>
+                        <alert id="alert-2"/>
+                    </configuration>
+                </cib>
             """
         )
 
     def test_exist(self):
         assert_xml_equal(
-            '<recipient id="rec-2" value="value2"/>',
-            etree.tostring(alert.get_recipient(self.xml, "value2")).decode()
+            '<recipient id="rec-1" value="value1"/>',
+            etree.tostring(
+                alert.get_recipient_by_id(self.xml, "rec-1")
+            ).decode()
         )
 
     def test_different_place(self):
         assert_raise_library_error(
-            lambda: alert.get_recipient(self.xml, "value4"),
+            lambda: alert.get_recipient_by_id(self.xml, "rec-4"),
             (
                 severities.ERROR,
-                report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
+                report_codes.ID_NOT_FOUND,
                 {
-                    "alert": "alert-1",
-                    "recipient": "value4"
+                    "id": "rec-4",
+                    "id_description": "Recipient"
+                }
+            )
+        )
+
+    def test_not_in_alert(self):
+        assert_raise_library_error(
+            lambda: alert.get_recipient_by_id(self.xml, "rec-2"),
+            (
+                severities.ERROR,
+                report_codes.ID_NOT_FOUND,
+                {
+                    "id": "rec-2",
+                    "id_description": "Recipient"
                 }
             )
         )
 
     def test_not_recipient(self):
         assert_raise_library_error(
-            lambda: alert.get_recipient(self.xml, "value3"),
+            lambda: alert.get_recipient_by_id(self.xml, "rec-3"),
             (
                 severities.ERROR,
-                report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
+                report_codes.ID_NOT_FOUND,
                 {
-                    "alert": "alert-1",
-                    "recipient": "value3"
+                    "id": "rec-3",
+                    "id_description": "Recipient"
                 }
             )
         )
 
 
+class EnsureRecipientValueIsUniqueTest(TestCase):
+    def setUp(self):
+        self.mock_reporter = MockLibraryReportProcessor()
+        self.alert = etree.Element("alert", id="alert-1")
+        self.recipient = etree.SubElement(
+            self.alert, "recipient", id="rec-1", value="value1"
+        )
+
+    def test_is_unique_no_duplicity_allowed(self):
+        alert.ensure_recipient_value_is_unique(
+            self.mock_reporter, self.alert, "value2"
+        )
+        self.assertEqual(0, len(self.mock_reporter.report_item_list))
+
+    def test_same_recipient_no_duplicity_allowed(self):
+        alert.ensure_recipient_value_is_unique(
+            self.mock_reporter, self.alert, "value1", recipient_id="rec-1"
+        )
+        self.assertEqual(0, len(self.mock_reporter.report_item_list))
+
+    def test_same_recipient_duplicity_allowed(self):
+        alert.ensure_recipient_value_is_unique(
+            self.mock_reporter, self.alert, "value1", recipient_id="rec-1",
+            allow_duplicity=True
+        )
+        self.assertEqual(0, len(self.mock_reporter.report_item_list))
+
+    def test_not_unique_no_duplicity_allowed(self):
+        report_item = (
+            severities.ERROR,
+            report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
+            {
+                "alert": "alert-1",
+                "recipient": "value1"
+            },
+            report_codes.FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE
+        )
+        assert_raise_library_error(
+            lambda: alert.ensure_recipient_value_is_unique(
+                self.mock_reporter, self.alert, "value1"
+            ),
+            report_item
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list, [report_item]
+        )
+
+    def test_is_unique_duplicity_allowed(self):
+        alert.ensure_recipient_value_is_unique(
+            self.mock_reporter, self.alert, "value2", allow_duplicity=True
+        )
+        self.assertEqual(0, len(self.mock_reporter.report_item_list))
+
+    def test_not_unique_duplicity_allowed(self):
+        alert.ensure_recipient_value_is_unique(
+            self.mock_reporter, self.alert, "value1", allow_duplicity=True
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [(
+                severities.WARNING,
+                report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
+                {
+                    "alert": "alert-1",
+                    "recipient": "value1"
+                }
+            )]
+        )
+
+
 class CreateAlertTest(TestCase):
     def setUp(self):
         self.tree = etree.XML(
@@ -462,6 +556,7 @@ class RemoveAlertTest(TestCase):
 
 class AddRecipientTest(TestCase):
     def setUp(self):
+        self.mock_reporter = MockLibraryReportProcessor()
         self.tree = etree.XML(
             """
             <cib>
@@ -476,11 +571,40 @@ class AddRecipientTest(TestCase):
             """
         )
 
-    def test_success(self):
+    def test_with_id(self):
+        assert_xml_equal(
+            '<recipient id="my-recipient" value="value1"/>',
+            etree.tostring(
+                alert.add_recipient(
+                    self.mock_reporter, self.tree, "alert", "value1",
+                    "my-recipient"
+                )
+            ).decode()
+        )
+        assert_xml_equal(
+            """
+            <cib>
+                <configuration>
+                    <alerts>
+                        <alert id="alert" path="/path">
+                            <recipient id="alert-recipient" value="test_val"/>
+                            <recipient id="my-recipient" value="value1"/>
+                        </alert>
+                    </alerts>
+                </configuration>
+            </cib>
+            """,
+            etree.tostring(self.tree).decode()
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    def test_without_id(self):
         assert_xml_equal(
             '<recipient id="alert-recipient-1" value="value1"/>',
             etree.tostring(
-                alert.add_recipient(self.tree, "alert", "value1")
+                alert.add_recipient(
+                    self.mock_reporter, self.tree, "alert", "value1"
+                )
             ).decode()
         )
         assert_xml_equal(
@@ -498,23 +622,85 @@ class AddRecipientTest(TestCase):
             """,
             etree.tostring(self.tree).decode()
         )
+        self.assertEqual([], self.mock_reporter.report_item_list)
 
-    def test_recipient_exist(self):
+    def test_id_exists(self):
         assert_raise_library_error(
-            lambda: alert.add_recipient(self.tree, "alert", "test_val"),
+            lambda: alert.add_recipient(
+                self.mock_reporter, self.tree, "alert", "value1",
+                "alert-recipient"
+            ),
             (
                 severities.ERROR,
+                report_codes.ID_ALREADY_EXISTS,
+                {"id": "alert-recipient"}
+            )
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    def test_duplicity_of_value_not_allowed(self):
+        report_item = (
+            severities.ERROR,
+            report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
+            {
+                "alert": "alert",
+                "recipient": "test_val"
+            },
+            report_codes.FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE
+        )
+        assert_raise_library_error(
+            lambda: alert.add_recipient(
+                self.mock_reporter, self.tree, "alert", "test_val"
+            ),
+            report_item
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [report_item]
+        )
+
+    def test_duplicity_of_value_allowed(self):
+        assert_xml_equal(
+            '<recipient id="alert-recipient-1" value="test_val"/>',
+            etree.tostring(
+                alert.add_recipient(
+                    self.mock_reporter, self.tree, "alert", "test_val",
+                    allow_same_value=True
+                )
+            ).decode()
+        )
+        assert_xml_equal(
+            """
+            <cib>
+                <configuration>
+                    <alerts>
+                        <alert id="alert" path="/path">
+                            <recipient id="alert-recipient" value="test_val"/>
+                            <recipient id="alert-recipient-1" value="test_val"/>
+                        </alert>
+                    </alerts>
+                </configuration>
+            </cib>
+            """,
+            etree.tostring(self.tree).decode()
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [(
+                severities.WARNING,
                 report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
                 {
-                    "recipient": "test_val",
-                    "alert": "alert"
+                    "alert": "alert",
+                    "recipient": "test_val"
                 }
-            )
+            )]
         )
 
     def test_alert_not_exist(self):
         assert_raise_library_error(
-            lambda: alert.add_recipient(self.tree, "alert1", "test_val"),
+            lambda: alert.add_recipient(
+                self.mock_reporter, self.tree, "alert1", "test_val"
+            ),
             (
                 severities.ERROR,
                 report_codes.CIB_ALERT_NOT_FOUND,
@@ -532,7 +718,8 @@ class AddRecipientTest(TestCase):
             />
             """,
             etree.tostring(alert.add_recipient(
-                self.tree, "alert", "value1", "desc"
+                self.mock_reporter, self.tree, "alert", "value1",
+                description="desc"
             )).decode()
         )
         assert_xml_equal(
@@ -554,10 +741,12 @@ class AddRecipientTest(TestCase):
             """,
             etree.tostring(self.tree).decode()
         )
+        self.assertEqual([], self.mock_reporter.report_item_list)
 
 
 class UpdateRecipientTest(TestCase):
     def setUp(self):
+        self.mock_reporter = MockLibraryReportProcessor()
         self.tree = etree.XML(
             """
             <cib>
@@ -577,6 +766,157 @@ class UpdateRecipientTest(TestCase):
             """
         )
 
+    def test_update_value(self):
+        assert_xml_equal(
+            """
+            <recipient id="alert-recipient" value="new_val"/>
+            """,
+            etree.tostring(alert.update_recipient(
+                self.mock_reporter, self.tree, "alert-recipient",
+                recipient_value="new_val"
+            )).decode()
+        )
+        assert_xml_equal(
+            """
+            <cib>
+                <configuration>
+                    <alerts>
+                        <alert id="alert" path="/path">
+                            <recipient id="alert-recipient" value="new_val"/>
+                            <recipient
+                                id="alert-recipient-1"
+                                value="value1"
+                                description="desc"
+                            />
+                        </alert>
+                    </alerts>
+                </configuration>
+            </cib>
+            """,
+            etree.tostring(self.tree).decode()
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    def test_update_same_value_no_duplicity_allowed(self):
+        assert_xml_equal(
+            '<recipient id="alert-recipient" value="test_val"/>',
+            etree.tostring(alert.update_recipient(
+                self.mock_reporter, self.tree, "alert-recipient",
+                recipient_value="test_val"
+            )).decode()
+        )
+        assert_xml_equal(
+            """
+            <cib>
+                <configuration>
+                    <alerts>
+                        <alert id="alert" path="/path">
+                            <recipient id="alert-recipient" value="test_val"/>
+                            <recipient
+                                id="alert-recipient-1"
+                                value="value1"
+                                description="desc"
+                            />
+                        </alert>
+                    </alerts>
+                </configuration>
+            </cib>
+            """,
+            etree.tostring(self.tree).decode()
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    def test_update_same_value_duplicity_allowed(self):
+        assert_xml_equal(
+            '<recipient id="alert-recipient" value="test_val"/>',
+            etree.tostring(alert.update_recipient(
+                self.mock_reporter, self.tree, "alert-recipient",
+                recipient_value="test_val", allow_same_value=True
+            )).decode()
+        )
+        assert_xml_equal(
+            """
+            <cib>
+                <configuration>
+                    <alerts>
+                        <alert id="alert" path="/path">
+                            <recipient id="alert-recipient" value="test_val"/>
+                            <recipient
+                                id="alert-recipient-1"
+                                value="value1"
+                                description="desc"
+                            />
+                        </alert>
+                    </alerts>
+                </configuration>
+            </cib>
+            """,
+            etree.tostring(self.tree).decode()
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    def test_duplicity_of_value_not_allowed(self):
+        report_item = (
+            severities.ERROR,
+            report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
+            {
+                "alert": "alert",
+                "recipient": "value1"
+            },
+            report_codes.FORCE_ALERT_RECIPIENT_VALUE_NOT_UNIQUE
+        )
+        assert_raise_library_error(
+            lambda: alert.update_recipient(
+                self.mock_reporter, self.tree, "alert-recipient", "value1"
+            ),
+            report_item
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [report_item]
+        )
+
+    def test_duplicity_of_value_allowed(self):
+        assert_xml_equal(
+            """
+            <recipient id="alert-recipient" value="value1"/>
+            """,
+            etree.tostring(alert.update_recipient(
+                self.mock_reporter, self.tree, "alert-recipient",
+                recipient_value="value1", allow_same_value=True
+            )).decode()
+        )
+        assert_xml_equal(
+            """
+            <cib>
+                <configuration>
+                    <alerts>
+                        <alert id="alert" path="/path">
+                            <recipient id="alert-recipient" value="value1"/>
+                            <recipient
+                                id="alert-recipient-1"
+                                value="value1"
+                                description="desc"
+                            />
+                        </alert>
+                    </alerts>
+                </configuration>
+            </cib>
+            """,
+            etree.tostring(self.tree).decode()
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [(
+                severities.WARNING,
+                report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
+                {
+                    "alert": "alert",
+                    "recipient": "value1"
+                }
+            )]
+        )
+
     def test_add_description(self):
         assert_xml_equal(
             """
@@ -585,7 +925,8 @@ class UpdateRecipientTest(TestCase):
             />
             """,
             etree.tostring(alert.update_recipient(
-                self.tree, "alert", "test_val", "description"
+                self.mock_reporter, self.tree, "alert-recipient",
+                description="description"
             )).decode()
         )
         assert_xml_equal(
@@ -611,6 +952,7 @@ class UpdateRecipientTest(TestCase):
             """,
             etree.tostring(self.tree).decode()
         )
+        self.assertEqual([], self.mock_reporter.report_item_list)
 
     def test_update_description(self):
         assert_xml_equal(
@@ -620,7 +962,8 @@ class UpdateRecipientTest(TestCase):
             />
             """,
             etree.tostring(alert.update_recipient(
-                self.tree, "alert", "value1", "description"
+                self.mock_reporter, self.tree, "alert-recipient-1",
+                description="description"
             )).decode()
         )
         assert_xml_equal(
@@ -642,6 +985,7 @@ class UpdateRecipientTest(TestCase):
             """,
             etree.tostring(self.tree).decode()
         )
+        self.assertEqual([], self.mock_reporter.report_item_list)
 
     def test_remove_description(self):
         assert_xml_equal(
@@ -649,7 +993,10 @@ class UpdateRecipientTest(TestCase):
                 <recipient id="alert-recipient-1" value="value1"/>
             """,
             etree.tostring(
-               alert.update_recipient(self.tree, "alert", "value1", "")
+               alert.update_recipient(
+                   self.mock_reporter, self.tree, "alert-recipient-1",
+                   description=""
+               )
             ).decode()
         )
         assert_xml_equal(
@@ -667,26 +1014,18 @@ class UpdateRecipientTest(TestCase):
             """,
             etree.tostring(self.tree).decode()
         )
-
-    def test_alert_not_exists(self):
-        assert_raise_library_error(
-            lambda: alert.update_recipient(self.tree, "alert1", "test_val", ""),
-            (
-                severities.ERROR,
-                report_codes.CIB_ALERT_NOT_FOUND,
-                {"alert": "alert1"}
-            )
-        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
 
     def test_recipient_not_exists(self):
         assert_raise_library_error(
-            lambda: alert.update_recipient(self.tree, "alert", "unknown", ""),
+            lambda: alert.update_recipient(
+                self.mock_reporter, self.tree, "recipient"),
             (
                 severities.ERROR,
-                report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
+                report_codes.ID_NOT_FOUND,
                 {
-                    "alert": "alert",
-                    "recipient": "unknown"
+                    "id": "recipient",
+                    "id_description": "Recipient"
                 }
             )
         )
@@ -710,7 +1049,7 @@ class RemoveRecipientTest(TestCase):
         )
 
     def test_success(self):
-        alert.remove_recipient(self.tree, "alert", "val")
+        alert.remove_recipient(self.tree, "alert-recipient-2")
         assert_xml_equal(
             """
             <cib>
@@ -726,25 +1065,15 @@ class RemoveRecipientTest(TestCase):
             etree.tostring(self.tree).decode()
         )
 
-    def test_alert_not_exists(self):
-        assert_raise_library_error(
-            lambda: alert.remove_recipient(self.tree, "alert1", "test_val"),
-            (
-                severities.ERROR,
-                report_codes.CIB_ALERT_NOT_FOUND,
-                {"alert": "alert1"}
-            )
-        )
-
     def test_recipient_not_exists(self):
         assert_raise_library_error(
-            lambda: alert.remove_recipient(self.tree, "alert", "unknown"),
+            lambda: alert.remove_recipient(self.tree, "recipient"),
             (
                 severities.ERROR,
-                report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
+                report_codes.ID_NOT_FOUND,
                 {
-                    "alert": "alert",
-                    "recipient": "unknown"
+                    "id": "recipient",
+                    "id_description": "Recipient"
                 }
             )
         )
diff --git a/pcs/lib/cib/test/test_constraint.py b/pcs/lib/cib/test/test_constraint.py
index 961f8b0..a4ee636 100644
--- a/pcs/lib/cib/test/test_constraint.py
+++ b/pcs/lib/cib/test/test_constraint.py
@@ -6,7 +6,7 @@ from __future__ import (
 )
 
 from functools import partial
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
 from lxml import etree
 
@@ -18,7 +18,7 @@ from pcs.test.tools.assertions import(
     assert_xml_equal,
 )
 from pcs.test.tools.custom_mock import MockLibraryReportProcessor
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 from pcs.test.tools.assertions import (
     assert_report_item_list_equal,
 )
diff --git a/pcs/lib/cib/test/test_constraint_colocation.py b/pcs/lib/cib/test/test_constraint_colocation.py
index 377b981..6a85d8a 100644
--- a/pcs/lib/cib/test/test_constraint_colocation.py
+++ b/pcs/lib/cib/test/test_constraint_colocation.py
@@ -5,13 +5,13 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
 from pcs.common import report_codes
 from pcs.lib.cib.constraint import colocation
 from pcs.lib.errors import ReportItemSeverity as severities
 from pcs.test.tools.assertions import assert_raise_library_error
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 
 
 #Patch check_new_id_applicable is always desired when working with
diff --git a/pcs/lib/cib/test/test_constraint_order.py b/pcs/lib/cib/test/test_constraint_order.py
index 02d1c5f..3cb33d1 100644
--- a/pcs/lib/cib/test/test_constraint_order.py
+++ b/pcs/lib/cib/test/test_constraint_order.py
@@ -5,13 +5,13 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
 from pcs.common import report_codes
 from pcs.lib.cib.constraint import order
 from pcs.lib.errors import ReportItemSeverity as severities
 from pcs.test.tools.assertions import assert_raise_library_error
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 
 
 #Patch check_new_id_applicable is always desired when working with
diff --git a/pcs/lib/cib/test/test_constraint_ticket.py b/pcs/lib/cib/test/test_constraint_ticket.py
index 87fd1e5..b720b55 100644
--- a/pcs/lib/cib/test/test_constraint_ticket.py
+++ b/pcs/lib/cib/test/test_constraint_ticket.py
@@ -6,13 +6,18 @@ from __future__ import (
 )
 
 from functools import partial
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
+
+from lxml import etree
 
 from pcs.common import report_codes
 from pcs.lib.cib.constraint import ticket
 from pcs.lib.errors import ReportItemSeverity as severities
-from pcs.test.tools.assertions import assert_raise_library_error
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.assertions import (
+    assert_raise_library_error,
+    assert_xml_equal,
+)
+from pcs.test.tools.pcs_unittest import mock
 
 
 @mock.patch("pcs.lib.cib.constraint.ticket.tools.check_new_id_applicable")
@@ -306,3 +311,130 @@ class AreDuplicateWithResourceSet(TestCase):
             Element({"ticket": "ticket_key"}),
             Element({"ticket": "X"}),
         ))
+
+class RemovePlainTest(TestCase):
+    def test_remove_tickets_constraints_for_resource(self):
+        constraint_section = etree.fromstring("""
+            <constraints>
+                <rsc_ticket id="t1" ticket="tA" rsc="rA"/>
+                <rsc_ticket id="t2" ticket="tA" rsc="rB"/>
+                <rsc_ticket id="t3" ticket="tA" rsc="rA"/>
+                <rsc_ticket id="t4" ticket="tB" rsc="rA"/>
+                <rsc_ticket id="t5" ticket="tB" rsc="rB"/>
+            </constraints>
+        """)
+
+        self.assertTrue(ticket.remove_plain(
+            constraint_section,
+            ticket_key="tA",
+            resource_id="rA",
+        ))
+
+        assert_xml_equal(etree.tostring(constraint_section).decode(), """
+            <constraints>
+                <rsc_ticket id="t2" ticket="tA" rsc="rB"/>
+                <rsc_ticket id="t4" ticket="tB" rsc="rA"/>
+                <rsc_ticket id="t5" ticket="tB" rsc="rB"/>
+            </constraints>
+        """)
+
+    def test_remove_nothing_when_no_matching_found(self):
+        constraint_section = etree.fromstring("""
+            <constraints>
+                <rsc_ticket id="t2" ticket="tA" rsc="rB"/>
+                <rsc_ticket id="t4" ticket="tB" rsc="rA"/>
+                <rsc_ticket id="t5" ticket="tB" rsc="rB"/>
+            </constraints>
+        """)
+
+        self.assertFalse(ticket.remove_plain(
+            constraint_section,
+            ticket_key="tA",
+            resource_id="rA",
+        ))
+
+        assert_xml_equal(etree.tostring(constraint_section).decode(), """
+            <constraints>
+                <rsc_ticket id="t2" ticket="tA" rsc="rB"/>
+                <rsc_ticket id="t4" ticket="tB" rsc="rA"/>
+                <rsc_ticket id="t5" ticket="tB" rsc="rB"/>
+            </constraints>
+        """)
+
+class RemoveWithSetTest(TestCase):
+    def test_remove_resource_references_and_empty_remaining_parents(self):
+        constraint_section = etree.fromstring("""
+            <constraints>
+                <rsc_ticket id="t1" ticket="tA">
+                    <resource_set id="rs1">
+                        <resource_ref id="rA"/>
+                    </resource_set>
+                    <resource_set id="rs2">
+                        <resource_ref id="rA"/>
+                    </resource_set>
+                </rsc_ticket>
+
+                <rsc_ticket id="t2" ticket="tA">
+                    <resource_set id="rs3">
+                        <resource_ref id="rA"/>
+                        <resource_ref id="rB"/>
+                    </resource_set>
+                    <resource_set id="rs4">
+                        <resource_ref id="rA"/>
+                    </resource_set>
+                </rsc_ticket>
+
+                <rsc_ticket id="t3" ticket="tB">
+                    <resource_set id="rs5">
+                        <resource_ref id="rA"/>
+                    </resource_set>
+                </rsc_ticket>
+            </constraints>
+        """)
+
+        self.assertTrue(ticket.remove_with_resource_set(
+            constraint_section,
+            ticket_key="tA",
+            resource_id="rA"
+        ))
+
+        assert_xml_equal(
+            """
+                <constraints>
+                    <rsc_ticket id="t2" ticket="tA">
+                        <resource_set id="rs3">
+                            <resource_ref id="rB"/>
+                        </resource_set>
+                    </rsc_ticket>
+
+                    <rsc_ticket id="t3" ticket="tB">
+                        <resource_set id="rs5">
+                            <resource_ref id="rA"/>
+                        </resource_set>
+                    </rsc_ticket>
+                </constraints>
+            """,
+            etree.tostring(constraint_section).decode()
+        )
+
+    def test_remove_nothing_when_no_matching_found(self):
+        constraint_section = etree.fromstring("""
+                <constraints>
+                    <rsc_ticket id="t2" ticket="tA">
+                        <resource_set id="rs3">
+                            <resource_ref id="rB"/>
+                        </resource_set>
+                    </rsc_ticket>
+
+                    <rsc_ticket id="t3" ticket="tB">
+                        <resource_set id="rs5">
+                            <resource_ref id="rA"/>
+                        </resource_set>
+                    </rsc_ticket>
+                </constraints>
+        """)
+        self.assertFalse(ticket.remove_with_resource_set(
+            constraint_section,
+            ticket_key="tA",
+            resource_id="rA"
+        ))
diff --git a/pcs/lib/cib/test/test_nvpair.py b/pcs/lib/cib/test/test_nvpair.py
index 6907f25..56ba4d1 100644
--- a/pcs/lib/cib/test/test_nvpair.py
+++ b/pcs/lib/cib/test/test_nvpair.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
 from lxml import etree
 
diff --git a/pcs/lib/cib/test/test_resource.py b/pcs/lib/cib/test/test_resource.py
index ef33ef6..c1e21a0 100644
--- a/pcs/lib/cib/test/test_resource.py
+++ b/pcs/lib/cib/test/test_resource.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 from lxml import etree
 from pcs.lib.cib.resource import find_by_id
 
diff --git a/pcs/lib/cib/test/test_resource_set.py b/pcs/lib/cib/test/test_resource_set.py
index 7b77ac4..e4fd8e4 100644
--- a/pcs/lib/cib/test/test_resource_set.py
+++ b/pcs/lib/cib/test/test_resource_set.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
 from lxml import etree
 
@@ -16,7 +16,7 @@ from pcs.test.tools.assertions import(
     assert_raise_library_error,
     assert_xml_equal
 )
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 
 
 class PrepareSetTest(TestCase):
diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py
index b59d50d..6285931 100644
--- a/pcs/lib/cib/tools.py
+++ b/pcs/lib/cib/tools.py
@@ -11,6 +11,7 @@ import tempfile
 from lxml import etree
 
 from pcs import settings
+from pcs.common.tools import join_multilines
 from pcs.lib import reports
 from pcs.lib.errors import LibraryError
 from pcs.lib.pacemaker_values import validate_id
@@ -21,7 +22,16 @@ def does_id_exist(tree, check_id):
     tree cib etree node
     check_id id to check
     """
-    return tree.find('.//*[@id="{0}"]'.format(check_id)) is not None
+    # ElementTree has getroot, Elemet has getroottree
+    root = tree.getroot() if hasattr(tree, "getroot") else tree.getroottree()
+    # do not search in /cib/status, it may contain references to previously
+    # existing and deleted resources and thus preventing creating them again
+    existing = root.xpath(
+        '(/cib/*[name()!="status"]|/*[name()!="cib"])//*[@id="{0}"]'.format(
+            check_id
+        )
+    )
+    return len(existing) > 0
 
 def validate_id_does_not_exist(tree, id):
     """
@@ -91,6 +101,13 @@ def get_constraints(tree):
     """
     return _get_mandatory_section(tree, "configuration/constraints")
 
+def get_resources(tree):
+    """
+    Return 'resources' element from tree
+    tree cib etree node
+    """
+    return _get_mandatory_section(tree, "configuration/resources")
+
 def find_parent(element, tag_names):
     candidate = element
     while True:
@@ -160,29 +177,33 @@ def upgrade_cib(cib, runner):
     cib -- cib etree
     runner -- CommandRunner
     """
-    temp_file = tempfile.NamedTemporaryFile("w+", suffix=".pcs")
-    temp_file.write(etree.tostring(cib).decode())
-    temp_file.flush()
-    output, retval = runner.run(
-        [
-            os.path.join(settings.pacemaker_binaries, "cibadmin"),
-            "--upgrade",
-            "--force"
-        ],
-        env_extend={"CIB_file": temp_file.name}
-    )
-
-    if retval != 0:
-        temp_file.close()
-        LibraryError(reports.cib_upgrade_failed(output))
-
+    temp_file = None
     try:
+        temp_file = tempfile.NamedTemporaryFile("w+", suffix=".pcs")
+        temp_file.write(etree.tostring(cib).decode())
+        temp_file.flush()
+        stdout, stderr, retval = runner.run(
+            [
+                os.path.join(settings.pacemaker_binaries, "cibadmin"),
+                "--upgrade",
+                "--force"
+            ],
+            env_extend={"CIB_file": temp_file.name}
+        )
+
+        if retval != 0:
+            temp_file.close()
+            raise LibraryError(
+                reports.cib_upgrade_failed(join_multilines([stderr, stdout]))
+            )
+
         temp_file.seek(0)
         return etree.fromstring(temp_file.read())
     except (EnvironmentError, etree.XMLSyntaxError, etree.DocumentInvalid) as e:
-        LibraryError(reports.cib_upgrade_failed(str(e)))
+        raise LibraryError(reports.cib_upgrade_failed(str(e)))
     finally:
-        temp_file.close()
+        if temp_file:
+            temp_file.close()
 
 
 def ensure_cib_version(runner, cib, version):
diff --git a/pcs/lib/commands/alert.py b/pcs/lib/commands/alert.py
index 7371fbc..432d9d5 100644
--- a/pcs/lib/commands/alert.py
+++ b/pcs/lib/commands/alert.py
@@ -90,7 +90,9 @@ def add_recipient(
     recipient_value,
     instance_attribute_dict,
     meta_attribute_dict,
-    description=None
+    recipient_id=None,
+    description=None,
+    allow_same_value=False
 ):
     """
     Add new recipient to alert witch id alert_id.
@@ -100,7 +102,9 @@ def add_recipient(
     recipient_value -- value of new recipient
     instance_attribute_dict -- dictionary of instance attributes to update
     meta_attribute_dict -- dictionary of meta attributes to update
+    recipient_id -- id of new recipient, if None it will be generated
     description -- recipient description
+    allow_same_value -- if True unique recipient value is not required
     """
     if not recipient_value:
         raise LibraryError(
@@ -109,7 +113,13 @@ def add_recipient(
 
     cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
     recipient = alert.add_recipient(
-        cib, alert_id, recipient_value, description
+        lib_env.report_processor,
+        cib,
+        alert_id,
+        recipient_value,
+        recipient_id=recipient_id,
+        description=description,
+        allow_same_value=allow_same_value
     )
     alert.update_instance_attributes(cib, recipient, instance_attribute_dict)
     alert.update_meta_attributes(cib, recipient, meta_attribute_dict)
@@ -119,26 +129,38 @@ def add_recipient(
 
 def update_recipient(
     lib_env,
-    alert_id,
-    recipient_value,
+    recipient_id,
     instance_attribute_dict,
     meta_attribute_dict,
-    description=None
+    recipient_value=None,
+    description=None,
+    allow_same_value=False
 ):
     """
     Update existing recipient.
 
     lib_env -- LibraryEnvironment
-    alert_id -- id of alert to which recipient belong
-    recipient_value -- recipient to be updated
+    recipient_id -- id of recipient to be updated
     instance_attribute_dict -- dictionary of instance attributes to update
     meta_attribute_dict -- dictionary of meta attributes to update
+    recipient_value -- new recipient value, if None old value will stay
+        unchanged
     description -- new description, if empty string, old description will be
         deleted, if None old value will stay unchanged
+    allow_same_value -- if True unique recipient value is not required
     """
+    if not recipient_value and recipient_value is not None:
+        raise LibraryError(
+            reports.cib_alert_recipient_invalid_value(recipient_value)
+        )
     cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
     recipient = alert.update_recipient(
-        cib, alert_id, recipient_value, description
+        lib_env.report_processor,
+        cib,
+        recipient_id,
+        recipient_value=recipient_value,
+        description=description,
+        allow_same_value=allow_same_value
     )
     alert.update_instance_attributes(cib, recipient, instance_attribute_dict)
     alert.update_meta_attributes(cib, recipient, meta_attribute_dict)
@@ -146,16 +168,15 @@ def update_recipient(
     lib_env.push_cib(cib)
 
 
-def remove_recipient(lib_env, alert_id, recipient_value):
+def remove_recipient(lib_env, recipient_id):
     """
     Remove existing recipient.
 
     lib_env -- LibraryEnvironment
-    alert_id -- id of alert to which recipient belong
-    recipient_value -- recipient to be removed
+    recipient_id -- if of recipient to be removed
     """
     cib = lib_env.get_cib(REQUIRED_CIB_VERSION)
-    alert.remove_recipient(cib, alert_id, recipient_value)
+    alert.remove_recipient(cib, recipient_id)
     lib_env.push_cib(cib)
 
 
diff --git a/pcs/lib/commands/booth.py b/pcs/lib/commands/booth.py
new file mode 100644
index 0000000..705900a
--- /dev/null
+++ b/pcs/lib/commands/booth.py
@@ -0,0 +1,383 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import base64
+import os.path
+from functools import partial
+
+from pcs import settings
+from pcs.common.tools import join_multilines
+from pcs.lib import external, reports
+from pcs.lib.booth import (
+    config_exchange,
+    config_files,
+    config_structure,
+    reports as booth_reports,
+    resource,
+    status,
+    sync,
+)
+from pcs.lib.booth.config_parser import parse, build
+from pcs.lib.booth.env import get_config_file_name
+from pcs.lib.cib.tools import get_resources
+from pcs.lib.errors import LibraryError, ReportItemSeverity
+from pcs.lib.node import NodeAddresses
+
+
+def config_setup(env, booth_configuration, overwrite_existing=False):
+    """
+    create boot configuration
+    list site_list contains site adresses of multisite
+    list arbitrator_list contains arbitrator adresses of multisite
+    """
+
+    config_content = config_exchange.from_exchange_format(booth_configuration)
+    config_structure.validate_peers(
+        *config_structure.take_peers(config_content)
+    )
+
+    env.booth.create_key(config_files.generate_key(), overwrite_existing)
+    config_content = config_structure.set_authfile(
+        config_content,
+        env.booth.key_path
+    )
+    env.booth.create_config(build(config_content), overwrite_existing)
+
+def config_destroy(env, ignore_config_load_problems=False):
+    env.booth.command_expect_live_env()
+    env.command_expect_live_corosync_env()
+
+    name = env.booth.name
+    config_is_used = partial(booth_reports.booth_config_is_used, name)
+
+    report_list = []
+
+    if(env.is_node_in_cluster() and resource.find_for_config(
+        get_resources(env.get_cib()),
+        get_config_file_name(name),
+    )):
+        report_list.append(config_is_used("in cluster resource"))
+
+    #Only systemd is currently supported. Initd does not supports multiple
+    #instances (here specified by name)
+    if external.is_systemctl():
+        if external.is_service_running(env.cmd_runner(), "booth", name):
+            report_list.append(config_is_used("(running in systemd)"))
+
+        if external.is_service_enabled(env.cmd_runner(), "booth", name):
+            report_list.append(config_is_used("(enabled in systemd)"))
+
+    if report_list:
+        raise LibraryError(*report_list)
+
+    authfile_path = None
+    try:
+        authfile_path = config_structure.get_authfile(
+            parse(env.booth.get_config_content())
+        )
+    except LibraryError:
+        if not ignore_config_load_problems:
+            raise LibraryError(booth_reports.booth_cannot_identify_keyfile())
+
+        #if content not received, not valid,... still remove config needed
+        env.report_processor.process(
+            booth_reports.booth_cannot_identify_keyfile(
+                severity=ReportItemSeverity.WARNING
+            )
+        )
+
+    if(
+        authfile_path
+        and
+        os.path.dirname(authfile_path) == settings.booth_config_dir
+    ):
+        env.booth.set_key_path(authfile_path)
+        env.booth.remove_key()
+    env.booth.remove_config()
+
+
+def config_text(env, name, node_name=None):
+    """
+    get configuration in raw format
+    string name -- name of booth instance whose config should be returned
+    string node_name -- get the config from specified node or local host if None
+    """
+    if node_name is None:
+        # TODO add name support
+        return env.booth.get_config_content()
+
+    remote_data = sync.pull_config_from_node(
+        env.node_communicator(), NodeAddresses(node_name), name
+    )
+    try:
+        return remote_data["config"]["data"]
+    except KeyError:
+        raise LibraryError(reports.invalid_response_format(node_name))
+
+
+def config_ticket_add(env, ticket_name, options, allow_unknown_options):
+    """
+    add ticket to booth configuration
+    dict options contains options for ticket
+    bool allow_unknown_options decide if can be used options not listed in
+        ticket options nor global options
+    """
+    booth_configuration = config_structure.add_ticket(
+        env.report_processor,
+        parse(env.booth.get_config_content()),
+        ticket_name,
+        options,
+        allow_unknown_options,
+    )
+    env.booth.push_config(build(booth_configuration))
+
+def config_ticket_remove(env, ticket_name):
+    """
+    remove ticket from booth configuration
+    """
+    booth_configuration = config_structure.remove_ticket(
+        parse(env.booth.get_config_content()),
+        ticket_name
+    )
+    env.booth.push_config(build(booth_configuration))
+
+def create_in_cluster(env, name, ip, resource_create, resource_remove):
+    #TODO resource_create is provisional hack until resources are not moved to
+    #lib
+    resources_section = get_resources(env.get_cib())
+
+    booth_config_file_path = get_config_file_name(name)
+    if resource.find_for_config(resources_section, booth_config_file_path):
+        raise LibraryError(booth_reports.booth_already_in_cib(name))
+
+    resource.get_creator(resource_create, resource_remove)(
+        ip,
+        booth_config_file_path,
+        create_id = partial(
+            resource.create_resource_id,
+            resources_section,
+            name
+        )
+    )
+
+def remove_from_cluster(env, name, resource_remove, allow_remove_multiple):
+    #TODO resource_remove is provisional hack until resources are not moved to
+    #lib
+    resource.get_remover(resource_remove)(
+        _find_resource_elements_for_operation(env, name, allow_remove_multiple)
+    )
+
+def restart(env, name, resource_restart, allow_multiple):
+    #TODO resource_restart is provisional hack until resources are not moved to
+    #lib
+    for booth_element in _find_resource_elements_for_operation(
+        env, name, allow_multiple
+    ):
+        resource_restart([booth_element.attrib["id"]])
+
+def ticket_operation(operation, env, name, ticket, site_ip):
+    if not site_ip:
+        site_ip_list = resource.find_bound_ip(
+            get_resources(env.get_cib()),
+            get_config_file_name(name)
+        )
+        if len(site_ip_list) != 1:
+            raise LibraryError(
+                booth_reports.booth_cannot_determine_local_site_ip()
+            )
+        site_ip = site_ip_list[0]
+
+    stdout, stderr, return_code = env.cmd_runner().run([
+        settings.booth_binary, operation,
+        "-s", site_ip,
+        ticket
+    ])
+
+    if return_code != 0:
+        raise LibraryError(
+            booth_reports.booth_ticket_operation_failed(
+                operation,
+                join_multilines([stderr, stdout]),
+                site_ip,
+                ticket
+            )
+        )
+
+ticket_grant = partial(ticket_operation, "grant")
+ticket_revoke = partial(ticket_operation, "revoke")
+
+def config_sync(env, name, skip_offline_nodes=False):
+    """
+    Send specified local booth configuration to all nodes in cluster.
+
+    env -- LibraryEnvironment
+    name -- booth instance name
+    skip_offline_nodes -- if True offline nodes will be skipped
+    """
+    config = env.booth.get_config_content()
+    authfile_path = config_structure.get_authfile(parse(config))
+    authfile_content = config_files.read_authfile(
+        env.report_processor, authfile_path
+    )
+
+    sync.send_config_to_all_nodes(
+        env.node_communicator(),
+        env.report_processor,
+        env.get_corosync_conf().get_nodes(),
+        name,
+        config,
+        authfile=authfile_path,
+        authfile_data=authfile_content,
+        skip_offline=skip_offline_nodes
+    )
+
+
+def enable_booth(env, name=None):
+    """
+    Enable specified instance of booth service. Currently it is supported only
+    systemd systems.
+
+    env -- LibraryEnvironment
+    name -- string, name of booth instance
+    """
+    external.ensure_is_systemd()
+    try:
+        external.enable_service(env.cmd_runner(), "booth", name)
+    except external.EnableServiceError as e:
+        raise LibraryError(reports.service_enable_error(
+            "booth", e.message, instance=name
+        ))
+    env.report_processor.process(reports.service_enable_success(
+        "booth", instance=name
+    ))
+
+
+def disable_booth(env, name=None):
+    """
+    Disable specified instance of booth service. Currently it is supported only
+    systemd systems.
+
+    env -- LibraryEnvironment
+    name -- string, name of booth instance
+    """
+    external.ensure_is_systemd()
+    try:
+        external.disable_service(env.cmd_runner(), "booth", name)
+    except external.DisableServiceError as e:
+        raise LibraryError(reports.service_disable_error(
+            "booth", e.message, instance=name
+        ))
+    env.report_processor.process(reports.service_disable_success(
+        "booth", instance=name
+    ))
+
+
+def start_booth(env, name=None):
+    """
+    Start specified instance of booth service. Currently it is supported only
+    systemd systems. On non systems it can be run like this:
+        BOOTH_CONF_FILE=<booth-file-path> /etc/initd/booth-arbitrator
+
+    env -- LibraryEnvironment
+    name -- string, name of booth instance
+    """
+    external.ensure_is_systemd()
+    try:
+        external.start_service(env.cmd_runner(), "booth", name)
+    except external.StartServiceError as e:
+        raise LibraryError(reports.service_start_error(
+            "booth", e.message, instance=name
+        ))
+    env.report_processor.process(reports.service_start_success(
+        "booth", instance=name
+    ))
+
+
+def stop_booth(env, name=None):
+    """
+    Stop specified instance of booth service. Currently it is supported only
+    systemd systems.
+
+    env -- LibraryEnvironment
+    name -- string, name of booth instance
+    """
+    external.ensure_is_systemd()
+    try:
+        external.stop_service(env.cmd_runner(), "booth", name)
+    except external.StopServiceError as e:
+        raise LibraryError(reports.service_stop_error(
+            "booth", e.message, instance=name
+        ))
+    env.report_processor.process(reports.service_stop_success(
+        "booth", instance=name
+    ))
+
+
+def pull_config(env, node_name, name):
+    """
+    Get config from specified node and save it on local system. It will
+    rewrite existing files.
+
+    env -- LibraryEnvironment
+    node_name -- string, name of node from which config should be fetched
+    name -- string, name of booth instance of which config should be fetched
+    """
+    env.report_processor.process(
+        booth_reports.booth_fetching_config_from_node_started(node_name, name)
+    )
+    output = sync.pull_config_from_node(
+        env.node_communicator(), NodeAddresses(node_name), name
+    )
+    try:
+        env.booth.create_config(output["config"]["data"], True)
+        if (
+            output["authfile"]["name"] is not None and
+            output["authfile"]["data"]
+        ):
+            env.booth.set_key_path(os.path.join(
+                settings.booth_config_dir, output["authfile"]["name"]
+            ))
+            env.booth.create_key(
+                base64.b64decode(
+                    output["authfile"]["data"].encode("utf-8")
+                ),
+                True
+            )
+        env.report_processor.process(
+            booth_reports.booth_config_accepted_by_node(name_list=[name])
+        )
+    except KeyError:
+        raise LibraryError(reports.invalid_response_format(node_name))
+
+
+def get_status(env, name=None):
+    return {
+        "status": status.get_daemon_status(env.cmd_runner(), name),
+        "ticket": status.get_tickets_status(env.cmd_runner(), name),
+        "peers": status.get_peers_status(env.cmd_runner(), name),
+    }
+
+def _find_resource_elements_for_operation(env, name, allow_multiple):
+    booth_element_list = resource.find_for_config(
+        get_resources(env.get_cib()),
+        get_config_file_name(name),
+    )
+
+    if not booth_element_list:
+        raise LibraryError(booth_reports.booth_not_exists_in_cib(name))
+
+    if len(booth_element_list) > 1:
+        if not allow_multiple:
+            raise LibraryError(booth_reports.booth_multiple_times_in_cib(name))
+        env.report_processor.process(
+            booth_reports.booth_multiple_times_in_cib(
+                name,
+                severity=ReportItemSeverity.WARNING,
+            )
+        )
+
+    return booth_element_list
diff --git a/pcs/lib/commands/constraint/ticket.py b/pcs/lib/commands/constraint/ticket.py
index e6960d5..a14c5ad 100644
--- a/pcs/lib/commands/constraint/ticket.py
+++ b/pcs/lib/commands/constraint/ticket.py
@@ -68,3 +68,26 @@ def create(
     )
 
     env.push_cib(cib)
+
+def remove(env, ticket_key, resource_id):
+    """
+    remove all ticket constraint from resource
+    If resource is in resource set with another resources then only resource ref
+    is removed. If resource is alone in resource set whole constraint is removed.
+    """
+    cib = env.get_cib()
+    constraint_section = get_constraints(cib)
+    any_plain_removed = ticket.remove_plain(
+        constraint_section,
+        ticket_key,
+        resource_id
+    )
+    any_with_resource_set_removed = ticket.remove_with_resource_set(
+        constraint_section,
+        ticket_key,
+        resource_id
+    )
+
+    env.push_cib(cib)
+
+    return any_plain_removed or any_with_resource_set_removed
diff --git a/pcs/lib/commands/qdevice.py b/pcs/lib/commands/qdevice.py
index 1d1d85f..119c51d 100644
--- a/pcs/lib/commands/qdevice.py
+++ b/pcs/lib/commands/qdevice.py
@@ -8,9 +8,10 @@ from __future__ import (
 import base64
 import binascii
 
+from pcs.common import report_codes
 from pcs.lib import external, reports
 from pcs.lib.corosync import qdevice_net
-from pcs.lib.errors import LibraryError
+from pcs.lib.errors import LibraryError, ReportItemSeverity
 
 
 def qdevice_setup(lib_env, model, enable, start):
@@ -31,13 +32,20 @@ def qdevice_setup(lib_env, model, enable, start):
     if start:
         _service_start(lib_env, qdevice_net.qdevice_start)
 
-def qdevice_destroy(lib_env, model):
+def qdevice_destroy(lib_env, model, proceed_if_used=False):
     """
     Stop and disable qdevice on local host and remove its configuration
     string model qdevice model to destroy
+    bool procced_if_used destroy qdevice even if it is used by clusters
     """
     _ensure_not_cman(lib_env)
     _check_model(model)
+    _check_qdevice_not_used(
+        lib_env.report_processor,
+        lib_env.cmd_runner(),
+        model,
+        proceed_if_used
+    )
     _service_stop(lib_env, qdevice_net.qdevice_stop)
     _service_disable(lib_env, qdevice_net.qdevice_disable)
     qdevice_net.qdevice_destroy()
@@ -53,11 +61,16 @@ def qdevice_status_text(lib_env, model, verbose=False, cluster=None):
     _ensure_not_cman(lib_env)
     _check_model(model)
     runner = lib_env.cmd_runner()
-    return (
-        qdevice_net.qdevice_status_generic_text(runner, verbose)
-        +
-        qdevice_net.qdevice_status_cluster_text(runner, cluster, verbose)
-    )
+    try:
+        return (
+            qdevice_net.qdevice_status_generic_text(runner, verbose)
+            +
+            qdevice_net.qdevice_status_cluster_text(runner, cluster, verbose)
+        )
+    except qdevice_net.QnetdNotRunningException:
+        raise LibraryError(
+            reports.qdevice_not_running(model)
+        )
 
 def qdevice_enable(lib_env, model):
     """
@@ -83,12 +96,20 @@ def qdevice_start(lib_env, model):
     _check_model(model)
     _service_start(lib_env, qdevice_net.qdevice_start)
 
-def qdevice_stop(lib_env, model):
+def qdevice_stop(lib_env, model, proceed_if_used=False):
     """
     stop qdevice now on local host
+    string model qdevice model to destroy
+    bool procced_if_used stop qdevice even if it is used by clusters
     """
     _ensure_not_cman(lib_env)
     _check_model(model)
+    _check_qdevice_not_used(
+        lib_env.report_processor,
+        lib_env.cmd_runner(),
+        model,
+        proceed_if_used
+    )
     _service_stop(lib_env, qdevice_net.qdevice_stop)
 
 def qdevice_kill(lib_env, model):
@@ -176,6 +197,22 @@ def _check_model(model):
             reports.invalid_option_value("model", model, ["net"])
         )
 
+def _check_qdevice_not_used(reporter, runner, model, force=False):
+    _check_model(model)
+    connected_clusters = []
+    if model == "net":
+        try:
+            status = qdevice_net.qdevice_status_cluster_text(runner)
+            connected_clusters = qdevice_net.qdevice_connected_clusters(status)
+        except qdevice_net.QnetdNotRunningException:
+            pass
+    if connected_clusters:
+        reporter.process(reports.qdevice_used_by_clusters(
+            connected_clusters,
+            ReportItemSeverity.WARNING if force else ReportItemSeverity.ERROR,
+            None if force else report_codes.FORCE_QDEVICE_USED
+        ))
+
 def _service_start(lib_env, func):
     lib_env.report_processor.process(
         reports.service_start_started("quorum device")
diff --git a/pcs/lib/commands/quorum.py b/pcs/lib/commands/quorum.py
index 7425e78..aa98e61 100644
--- a/pcs/lib/commands/quorum.py
+++ b/pcs/lib/commands/quorum.py
@@ -5,8 +5,9 @@ from __future__ import (
     unicode_literals,
 )
 
-from pcs.lib import reports
-from pcs.lib.errors import LibraryError
+from pcs.common import report_codes
+from pcs.lib import reports, sbd
+from pcs.lib.errors import LibraryError, ReportItemSeverity
 from pcs.lib.corosync import (
     live as corosync_live,
     qdevice_net,
@@ -39,16 +40,50 @@ def get_config(lib_env):
         "device": device,
     }
 
-def set_options(lib_env, options, skip_offline_nodes=False):
+
+def _check_if_atb_can_be_disabled(
+    runner, report_processor, corosync_conf, was_enabled, force=False
+):
+    """
+    Check whenever auto_tie_breaker can be changed without affecting SBD.
+    Raises LibraryError if change of ATB will affect SBD functionality.
+
+    runner -- CommandRunner
+    report_processor -- report processor
+    corosync_conf -- corosync conf facade
+    was_enabled -- True if ATB was enabled, False otherwise
+    force -- force change
+    """
+    if (
+        was_enabled
+        and
+        not corosync_conf.is_enabled_auto_tie_breaker()
+        and
+        sbd.is_auto_tie_breaker_needed(runner, corosync_conf)
+    ):
+        report_processor.process(reports.quorum_cannot_disable_atb_due_to_sbd(
+            ReportItemSeverity.WARNING if force else ReportItemSeverity.ERROR,
+            None if force else report_codes.FORCE_OPTIONS
+        ))
+
+
+def set_options(lib_env, options, skip_offline_nodes=False, force=False):
     """
     Set corosync quorum options, distribute and reload corosync.conf if live
     lib_env LibraryEnvironment
     options quorum options (dict)
     skip_offline_nodes continue even if not all nodes are accessible
+    bool force force changes
     """
     __ensure_not_cman(lib_env)
     cfg = lib_env.get_corosync_conf()
+    atb_enabled = cfg.is_enabled_auto_tie_breaker()
     cfg.set_quorum_options(lib_env.report_processor, options)
+    if lib_env.is_corosync_conf_live:
+        _check_if_atb_can_be_disabled(
+            lib_env.cmd_runner(), lib_env.report_processor,
+            cfg, atb_enabled, force
+        )
     lib_env.push_corosync_conf(cfg, skip_offline_nodes)
 
 def status_text(lib_env):
@@ -248,14 +283,20 @@ def remove_device(lib_env, skip_offline_nodes=False):
     cfg = lib_env.get_corosync_conf()
     model, dummy_options, dummy_options = cfg.get_quorum_device_settings()
     cfg.remove_quorum_device()
-    lib_env.push_corosync_conf(cfg, skip_offline_nodes)
 
     if lib_env.is_corosync_conf_live:
+        communicator = lib_env.node_communicator()
+        # fix quorum options for SBD to work properly
+        if sbd.atb_has_to_be_enabled(lib_env.cmd_runner(), cfg):
+            lib_env.report_processor.process(reports.sbd_requires_atb())
+            cfg.set_quorum_options(
+                lib_env.report_processor, {"auto_tie_breaker": "1"}
+            )
+
         # disable qdevice
         lib_env.report_processor.process(
             reports.service_disable_started("corosync-qdevice")
         )
-        communicator = lib_env.node_communicator()
         parallel_nodes_communication_helper(
             qdevice_client.remote_client_disable,
             [
@@ -269,7 +310,6 @@ def remove_device(lib_env, skip_offline_nodes=False):
         lib_env.report_processor.process(
             reports.service_stop_started("corosync-qdevice")
         )
-        communicator = lib_env.node_communicator()
         parallel_nodes_communication_helper(
             qdevice_client.remote_client_stop,
             [
@@ -287,6 +327,8 @@ def remove_device(lib_env, skip_offline_nodes=False):
                 skip_offline_nodes
             )
 
+    lib_env.push_corosync_conf(cfg, skip_offline_nodes)
+
 def _remove_device_model_net(lib_env, cluster_nodes, skip_offline_nodes):
     """
     remove configuration used by qdevice model net
diff --git a/pcs/lib/commands/sbd.py b/pcs/lib/commands/sbd.py
index 875758f..2acb104 100644
--- a/pcs/lib/commands/sbd.py
+++ b/pcs/lib/commands/sbd.py
@@ -5,6 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
+import os
 import json
 
 from pcs import settings
@@ -44,7 +45,9 @@ def _validate_sbd_options(sbd_config, allow_unknown_opts=False):
     """
 
     report_item_list = []
-    unsupported_sbd_option_list = ["SBD_WATCHDOG_DEV", "SBD_OPTS"]
+    unsupported_sbd_option_list = [
+        "SBD_WATCHDOG_DEV", "SBD_OPTS", "SBD_PACEMAKER"
+    ]
     allowed_sbd_options = [
         "SBD_DELAY_START", "SBD_STARTMODE", "SBD_WATCHDOG_TIMEOUT"
     ]
@@ -62,6 +65,17 @@ def _validate_sbd_options(sbd_config, allow_unknown_opts=False):
                 Severities.WARNING if allow_unknown_opts else Severities.ERROR,
                 None if allow_unknown_opts else report_codes.FORCE_OPTIONS
             ))
+    if "SBD_WATCHDOG_TIMEOUT" in sbd_config:
+        report_item = reports.invalid_option_value(
+            "SBD_WATCHDOG_TIMEOUT",
+            sbd_config["SBD_WATCHDOG_TIMEOUT"],
+            "nonnegative integer"
+        )
+        try:
+            if int(sbd_config["SBD_WATCHDOG_TIMEOUT"]) < 0:
+                report_item_list.append(report_item)
+        except (ValueError, TypeError):
+            report_item_list.append(report_item)
 
     return report_item_list
 
@@ -81,6 +95,9 @@ def _get_full_watchdog_list(node_list, default_watchdog, watchdog_dict):
     report_item_list = []
 
     for node_name, watchdog in watchdog_dict.items():
+        if not watchdog or not os.path.isabs(watchdog):
+            report_item_list.append(reports.invalid_watchdog_path(watchdog))
+            continue
         try:
             full_dict[node_list.find_by_label(node_name)] = watchdog
         except NodeNotFound:
@@ -140,6 +157,15 @@ def enable_sbd(
         full_watchdog_dict
     )
 
+    # enable ATB if needed
+    corosync_conf = lib_env.get_corosync_conf()
+    if sbd.atb_has_to_be_enabled_pre_enable_check(corosync_conf):
+        lib_env.report_processor.process(reports.sbd_requires_atb())
+        corosync_conf.set_quorum_options(
+            lib_env.report_processor, {"auto_tie_breaker": "1"}
+        )
+        lib_env.push_corosync_conf(corosync_conf, ignore_offline_nodes)
+
     # distribute SBD configuration
     config = sbd.get_default_sbd_config()
     config.update(sbd_options)
@@ -147,7 +173,8 @@ def enable_sbd(
         lib_env.report_processor,
         lib_env.node_communicator(),
         online_nodes,
-        config
+        config,
+        full_watchdog_dict
     )
 
     # remove cluster prop 'stonith_watchdog_timeout'
diff --git a/pcs/lib/commands/test/test_alert.py b/pcs/lib/commands/test/test_alert.py
index 34813df..bc68baf 100644
--- a/pcs/lib/commands/test/test_alert.py
+++ b/pcs/lib/commands/test/test_alert.py
@@ -8,9 +8,9 @@ from __future__ import (
 import logging
 from lxml import etree
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 from pcs.test.tools.assertions import (
     assert_raise_library_error,
     assert_xml_equal,
@@ -361,19 +361,17 @@ class AddRecipientTest(TestCase):
     def test_recipient_already_exists(self):
         assert_raise_library_error(
             lambda: cmd_alert.add_recipient(
-                self.mock_env, "alert", "value1", {}, {}
+                self.mock_env, "alert", "value1", {}, {},
+                recipient_id="alert-recipient"
             ),
             (
                 Severities.ERROR,
-                report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
-                {
-                    "recipient": "value1",
-                    "alert": "alert"
-                }
+                report_codes.ID_ALREADY_EXISTS,
+                {"id": "alert-recipient"}
             )
         )
 
-    def test_success(self):
+    def test_without_id(self):
         cmd_alert.add_recipient(
             self.mock_env,
             "alert",
@@ -424,6 +422,58 @@ class AddRecipientTest(TestCase):
             self.mock_env._get_cib_xml()
         )
 
+    def test_with_id(self):
+        cmd_alert.add_recipient(
+            self.mock_env,
+            "alert",
+            "value",
+            {"attr1": "val1"},
+            {
+                "attr2": "val2",
+                "attr1": "val1"
+            },
+            recipient_id="my-recipient"
+        )
+        assert_xml_equal(
+            """
+<cib validate-with="pacemaker-2.5">
+    <configuration>
+        <alerts>
+            <alert id="alert" path="path">
+                <recipient id="alert-recipient" value="value1"/>
+                <recipient id="my-recipient" value="value">
+                    <meta_attributes
+                        id="my-recipient-meta_attributes"
+                    >
+                        <nvpair
+                            id="my-recipient-meta_attributes-attr1"
+                            name="attr1"
+                            value="val1"
+                        />
+                        <nvpair
+                            id="my-recipient-meta_attributes-attr2"
+                            name="attr2"
+                            value="val2"
+                        />
+                    </meta_attributes>
+                    <instance_attributes
+                        id="my-recipient-instance_attributes"
+                    >
+                        <nvpair
+                            id="my-recipient-instance_attributes-attr1"
+                            name="attr1"
+                            value="val1"
+                        />
+                    </instance_attributes>
+                </recipient>
+            </alert>
+        </alerts>
+    </configuration>
+</cib>
+            """,
+            self.mock_env._get_cib_xml()
+        )
+
 
 class UpdateRecipientTest(TestCase):
     def setUp(self):
@@ -470,29 +520,29 @@ class UpdateRecipientTest(TestCase):
             self.mock_log, self.mock_rep, cib_data=cib
         )
 
-    def test_alert_not_found(self):
+    def test_empty_value(self):
         assert_raise_library_error(
             lambda: cmd_alert.update_recipient(
-                self.mock_env, "unknown", "recipient", {}, {}
+                self.mock_env, "alert-recipient-1", {}, {}, recipient_value=""
             ),
             (
                 Severities.ERROR,
-                report_codes.CIB_ALERT_NOT_FOUND,
-                {"alert": "unknown"}
+                report_codes.CIB_ALERT_RECIPIENT_VALUE_INVALID,
+                {"recipient": ""}
             )
         )
 
     def test_recipient_not_found(self):
         assert_raise_library_error(
             lambda: cmd_alert.update_recipient(
-                self.mock_env, "alert", "recipient", {}, {}
+                self.mock_env, "recipient", {}, {}
             ),
             (
                 Severities.ERROR,
-                report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
+                report_codes.ID_NOT_FOUND,
                 {
-                    "recipient": "recipient",
-                    "alert": "alert"
+                    "id": "recipient",
+                    "id_description": "Recipient"
                 }
             )
         )
@@ -500,14 +550,14 @@ class UpdateRecipientTest(TestCase):
     def test_update_all(self):
         cmd_alert.update_recipient(
             self.mock_env,
-            "alert",
-            "value",
+            "alert-recipient-1",
             {"attr1": "value"},
             {
                 "attr1": "",
                 "attr3": "new_val"
             },
-            "desc"
+            recipient_value="new_val",
+            description="desc"
         )
         assert_xml_equal(
             """
@@ -518,7 +568,7 @@ class UpdateRecipientTest(TestCase):
                 <recipient id="alert-recipient" value="value1"/>
                 <recipient
                     id="alert-recipient-1"
-                    value="value"
+                    value="new_val"
                     description="desc"
                 >
                     <meta_attributes
@@ -575,35 +625,20 @@ class RemoveRecipientTest(TestCase):
             self.mock_log, self.mock_rep, cib_data=cib
         )
 
-    def test_alert_not_found(self):
-        assert_raise_library_error(
-            lambda: cmd_alert.remove_recipient(
-                self.mock_env, "unknown", "recipient"
-            ),
-            (
-                Severities.ERROR,
-                report_codes.CIB_ALERT_NOT_FOUND,
-                {"alert": "unknown"}
-            )
-        )
-
     def test_recipient_not_found(self):
         assert_raise_library_error(
             lambda: cmd_alert.remove_recipient(
-                self.mock_env, "alert", "recipient"
+                self.mock_env, "recipient"
             ),
             (
                 Severities.ERROR,
-                report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
-                {
-                    "recipient": "recipient",
-                    "alert": "alert"
-                }
+                report_codes.ID_NOT_FOUND,
+                {"id": "recipient"}
             )
         )
 
     def test_success(self):
-        cmd_alert.remove_recipient(self.mock_env, "alert", "value1")
+        cmd_alert.remove_recipient(self.mock_env, "alert-recipient")
         assert_xml_equal(
             """
             <cib validate-with="pacemaker-2.5">
diff --git a/pcs/lib/commands/test/test_booth.py b/pcs/lib/commands/test/test_booth.py
new file mode 100644
index 0000000..6bcab2b
--- /dev/null
+++ b/pcs/lib/commands/test/test_booth.py
@@ -0,0 +1,614 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import os
+import base64
+
+from pcs.test.tools.pcs_unittest import TestCase
+
+from pcs.test.tools.pcs_unittest import mock
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.assertions import (
+    assert_raise_library_error,
+    assert_report_item_list_equal,
+)
+from pcs.test.tools.misc import create_patcher
+
+from pcs import settings
+from pcs.common import report_codes
+from pcs.lib.env import LibraryEnvironment
+from pcs.lib.node import NodeAddresses
+from pcs.lib.errors import LibraryError, ReportItemSeverity as Severities
+from pcs.lib.commands import booth as commands
+from pcs.lib.external import (
+    NodeCommunicator,
+    CommandRunner,
+    EnableServiceError,
+    DisableServiceError,
+    StartServiceError,
+    StopServiceError
+)
+
+patch_commands = create_patcher("pcs.lib.commands.booth")
+
+ at mock.patch("pcs.lib.booth.config_files.generate_key", return_value="key value")
+ at mock.patch("pcs.lib.commands.booth.build", return_value="config content")
+ at mock.patch("pcs.lib.booth.config_structure.validate_peers")
+class ConfigSetupTest(TestCase):
+    def test_successfuly_build_and_write_to_std_path(
+        self, mock_validate_peers, mock_build, mock_generate_key
+    ):
+        env = mock.MagicMock()
+        commands.config_setup(
+            env,
+            booth_configuration=[
+                {"key": "site", "value": "1.1.1.1", "details": []},
+                {"key": "arbitrator", "value": "2.2.2.2", "details": []},
+            ],
+        )
+        env.booth.create_config.assert_called_once_with(
+            "config content",
+            False
+        )
+        env.booth.create_key.assert_called_once_with(
+            "key value",
+            False
+        )
+        mock_validate_peers.assert_called_once_with(
+            ["1.1.1.1"], ["2.2.2.2"]
+        )
+
+    def test_sanitize_peers_before_validation(
+        self, mock_validate_peers, mock_build, mock_generate_key
+    ):
+        commands.config_setup(env=mock.MagicMock(), booth_configuration={})
+        mock_validate_peers.assert_called_once_with([], [])
+
+
+class ConfigDestroyTest(TestCase):
+    @patch_commands("external.is_systemctl", mock.Mock(return_value=True))
+    @patch_commands("external.is_service_enabled", mock.Mock(return_value=True))
+    @patch_commands("external.is_service_running", mock.Mock(return_value=True))
+    @patch_commands("resource.find_for_config", mock.Mock(return_value=[True]))
+    def test_raises_when_booth_config_in_use(self):
+        env = mock.MagicMock()
+        env.booth.name = "somename"
+
+        assert_raise_library_error(
+            lambda: commands.config_destroy(env),
+            (
+                Severities.ERROR,
+                report_codes.BOOTH_CONFIG_IS_USED,
+                {
+                    "name": "somename",
+                    "detail": "in cluster resource",
+                }
+            ),
+            (
+                Severities.ERROR,
+                report_codes.BOOTH_CONFIG_IS_USED,
+                {
+                    "name": "somename",
+                    "detail": "(enabled in systemd)",
+                }
+            ),
+            (
+                Severities.ERROR,
+                report_codes.BOOTH_CONFIG_IS_USED,
+                {
+                    "name": "somename",
+                    "detail": "(running in systemd)",
+                }
+            )
+        )
+
+    @patch_commands("external.is_systemctl", mock.Mock(return_value=False))
+    @patch_commands("resource.find_for_config", mock.Mock(return_value=[]))
+    @patch_commands("parse", mock.Mock(side_effect=LibraryError()))
+    def test_raises_when_cannot_get_content_of_config(self):
+        env = mock.MagicMock()
+        assert_raise_library_error(
+            lambda: commands.config_destroy(env),
+            (
+                Severities.ERROR,
+                report_codes.BOOTH_CANNOT_IDENTIFY_KEYFILE,
+                {},
+                report_codes.FORCE_BOOTH_DESTROY
+            )
+        )
+
+    @patch_commands("external.is_systemctl", mock.Mock(return_value=False))
+    @patch_commands("resource.find_for_config", mock.Mock(return_value=[]))
+    @patch_commands("parse", mock.Mock(side_effect=LibraryError()))
+    def test_remove_config_even_if_cannot_get_its_content_when_forced(self):
+        env = mock.MagicMock()
+        env.report_processor = MockLibraryReportProcessor()
+        commands.config_destroy(env, ignore_config_load_problems=True)
+        env.booth.remove_config.assert_called_once_with()
+        assert_report_item_list_equal(env.report_processor.report_item_list, [
+            (
+                Severities.WARNING,
+                report_codes.BOOTH_CANNOT_IDENTIFY_KEYFILE,
+                {}
+            )
+        ])
+
+ at mock.patch("pcs.lib.commands.booth.config_structure.get_authfile")
+ at mock.patch("pcs.lib.commands.booth.parse")
+ at mock.patch("pcs.lib.booth.config_files.read_authfile")
+ at mock.patch("pcs.lib.booth.sync.send_config_to_all_nodes")
+class ConfigSyncTest(TestCase):
+    def setUp(self):
+        self.mock_env = mock.MagicMock()
+        self.mock_rep = MockLibraryReportProcessor()
+        self.mock_env.report_processor = self.mock_rep
+        self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
+        self.mock_env.node_communicator.return_value = self.mock_com
+        self.node_list = ["node1", "node2", "node3"]
+        corosync_conf = mock.MagicMock()
+        corosync_conf.get_nodes.return_value = self.node_list
+        self.mock_env.get_corosync_conf.return_value = corosync_conf
+        self.mock_env.booth.get_config_content.return_value = "config"
+
+    def test_skip_offline(
+        self, mock_sync, mock_read_key, mock_parse, mock_get_authfile
+    ):
+        mock_get_authfile.return_value = "/key/path.key"
+        mock_read_key.return_value = "key"
+        commands.config_sync(self.mock_env, "name", True)
+        self.mock_env.booth.get_config_content.assert_called_once_with()
+        mock_read_key.assert_called_once_with(self.mock_rep, "/key/path.key")
+        mock_parse.assert_called_once_with("config")
+        mock_sync.assert_called_once_with(
+            self.mock_com,
+            self.mock_rep,
+            self.node_list,
+            "name",
+            "config",
+            authfile="/key/path.key",
+            authfile_data="key",
+            skip_offline=True
+        )
+
+    def test_do_not_skip_offline(
+        self, mock_sync, mock_read_key, mock_parse, mock_get_authfile
+    ):
+        mock_get_authfile.return_value = "/key/path.key"
+        mock_read_key.return_value = "key"
+        commands.config_sync(self.mock_env, "name")
+        self.mock_env.booth.get_config_content.assert_called_once_with()
+        mock_read_key.assert_called_once_with(self.mock_rep, "/key/path.key")
+        mock_parse.assert_called_once_with("config")
+        mock_sync.assert_called_once_with(
+            self.mock_com,
+            self.mock_rep,
+            self.node_list,
+            "name",
+            "config",
+            authfile="/key/path.key",
+            authfile_data="key",
+            skip_offline=False
+        )
+
+
+ at mock.patch("pcs.lib.commands.booth.external.ensure_is_systemd")
+ at mock.patch("pcs.lib.external.enable_service")
+class EnableBoothTest(TestCase):
+    def setUp(self):
+        self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment)
+        self.mock_rep = MockLibraryReportProcessor()
+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
+        self.mock_env.cmd_runner.return_value = self.mock_run
+        self.mock_env.report_processor = self.mock_rep
+
+    def test_success(self, mock_enable, mock_is_systemctl):
+        commands.enable_booth(self.mock_env, "name")
+        mock_enable.assert_called_once_with(self.mock_run, "booth", "name")
+        mock_is_systemctl.assert_called_once_with()
+        assert_report_item_list_equal(
+            self.mock_rep.report_item_list,
+            [(
+                Severities.INFO,
+                report_codes.SERVICE_ENABLE_SUCCESS,
+                {
+                    "service": "booth",
+                    "node": None,
+                    "instance": "name",
+                }
+            )]
+        )
+
+    def test_failed(self, mock_enable, mock_is_systemctl):
+        mock_enable.side_effect = EnableServiceError("booth", "msg", "name")
+        assert_raise_library_error(
+            lambda: commands.enable_booth(self.mock_env, "name"),
+            (
+                Severities.ERROR,
+                report_codes.SERVICE_ENABLE_ERROR,
+                {
+                    "service": "booth",
+                    "reason": "msg",
+                    "node": None,
+                    "instance": "name",
+                }
+            )
+        )
+        mock_enable.assert_called_once_with(self.mock_run, "booth", "name")
+        mock_is_systemctl.assert_called_once_with()
+
+
+ at mock.patch("pcs.lib.commands.booth.external.ensure_is_systemd")
+ at mock.patch("pcs.lib.external.disable_service")
+class DisableBoothTest(TestCase):
+    def setUp(self):
+        self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment)
+        self.mock_rep = MockLibraryReportProcessor()
+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
+        self.mock_env.cmd_runner.return_value = self.mock_run
+        self.mock_env.report_processor = self.mock_rep
+
+    def test_success(self, mock_disable, mock_is_systemctl):
+        commands.disable_booth(self.mock_env, "name")
+        mock_disable.assert_called_once_with(self.mock_run, "booth", "name")
+        mock_is_systemctl.assert_called_once_with()
+        assert_report_item_list_equal(
+            self.mock_rep.report_item_list,
+            [(
+                Severities.INFO,
+                report_codes.SERVICE_DISABLE_SUCCESS,
+                {
+                    "service": "booth",
+                    "node": None,
+                    "instance": "name",
+                }
+            )]
+        )
+
+    def test_failed(self, mock_disable, mock_is_systemctl):
+        mock_disable.side_effect = DisableServiceError("booth", "msg", "name")
+        assert_raise_library_error(
+            lambda: commands.disable_booth(self.mock_env, "name"),
+            (
+                Severities.ERROR,
+                report_codes.SERVICE_DISABLE_ERROR,
+                {
+                    "service": "booth",
+                    "reason": "msg",
+                    "node": None,
+                    "instance": "name",
+                }
+            )
+        )
+        mock_disable.assert_called_once_with(self.mock_run, "booth", "name")
+        mock_is_systemctl.assert_called_once_with()
+
+
+ at mock.patch("pcs.lib.commands.booth.external.ensure_is_systemd")
+ at mock.patch("pcs.lib.external.start_service")
+class StartBoothTest(TestCase):
+    def setUp(self):
+        self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment)
+        self.mock_rep = MockLibraryReportProcessor()
+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
+        self.mock_env.cmd_runner.return_value = self.mock_run
+        self.mock_env.report_processor = self.mock_rep
+
+    def test_success(self, mock_start, mock_is_systemctl):
+        commands.start_booth(self.mock_env, "name")
+        mock_start.assert_called_once_with(self.mock_run, "booth", "name")
+        mock_is_systemctl.assert_called_once_with()
+        assert_report_item_list_equal(
+            self.mock_rep.report_item_list,
+            [(
+                Severities.INFO,
+                report_codes.SERVICE_START_SUCCESS,
+                {
+                    "service": "booth",
+                    "node": None,
+                    "instance": "name",
+                }
+            )]
+        )
+
+    def test_failed(self, mock_start, mock_is_systemctl):
+        mock_start.side_effect = StartServiceError("booth", "msg", "name")
+        assert_raise_library_error(
+            lambda: commands.start_booth(self.mock_env, "name"),
+            (
+                Severities.ERROR,
+                report_codes.SERVICE_START_ERROR,
+                {
+                    "service": "booth",
+                    "reason": "msg",
+                    "node": None,
+                    "instance": "name",
+                }
+            )
+        )
+        mock_start.assert_called_once_with(self.mock_run, "booth", "name")
+        mock_is_systemctl.assert_called_once_with()
+
+
+ at mock.patch("pcs.lib.commands.booth.external.ensure_is_systemd")
+ at mock.patch("pcs.lib.external.stop_service")
+class StopBoothTest(TestCase):
+    def setUp(self):
+        self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment)
+        self.mock_rep = MockLibraryReportProcessor()
+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
+        self.mock_env.cmd_runner.return_value = self.mock_run
+        self.mock_env.report_processor = self.mock_rep
+
+    def test_success(self, mock_stop, mock_is_systemctl):
+        commands.stop_booth(self.mock_env, "name")
+        mock_stop.assert_called_once_with(self.mock_run, "booth", "name")
+        mock_is_systemctl.assert_called_once_with()
+        assert_report_item_list_equal(
+            self.mock_rep.report_item_list,
+            [(
+                Severities.INFO,
+                report_codes.SERVICE_STOP_SUCCESS,
+                {
+                    "service": "booth",
+                    "node": None,
+                    "instance": "name",
+                }
+            )]
+        )
+
+    def test_failed(self, mock_stop, mock_is_systemctl):
+        mock_stop.side_effect = StopServiceError("booth", "msg", "name")
+        assert_raise_library_error(
+            lambda: commands.stop_booth(self.mock_env, "name"),
+            (
+                Severities.ERROR,
+                report_codes.SERVICE_STOP_ERROR,
+                {
+                    "service": "booth",
+                    "reason": "msg",
+                    "node": None,
+                    "instance": "name",
+                }
+            )
+        )
+        mock_stop.assert_called_once_with(self.mock_run, "booth", "name")
+        mock_is_systemctl.assert_called_once_with()
+
+
+ at mock.patch("pcs.lib.booth.sync.pull_config_from_node")
+class PullConfigTest(TestCase):
+    def setUp(self):
+        self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment)
+        self.mock_rep = MockLibraryReportProcessor()
+        self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
+        self.mock_env.node_communicator.return_value = self.mock_com
+        self.mock_env.report_processor = self.mock_rep
+
+    def test_with_authfile(self, mock_pull):
+        mock_pull.return_value = {
+            "config": {
+                "name": "name.conf",
+                "data": "config"
+            },
+            "authfile": {
+                "name": "name.key",
+                "data": base64.b64encode("key".encode("utf-8")).decode("utf-8")
+            }
+        }
+        commands.pull_config(self.mock_env, "node", "name")
+        mock_pull.assert_called_once_with(
+            self.mock_com, NodeAddresses("node"), "name"
+        )
+        self.mock_env.booth.create_config.called_once_with("config", True)
+        self.mock_env.booth.set_key_path.called_once_with(os.path.join(
+            settings.booth_config_dir, "name.key"
+        ))
+        self.mock_env.booth.create_key.called_once_with(
+            "key".encode("utf-8"), True
+        )
+        assert_report_item_list_equal(
+            self.mock_rep.report_item_list,
+            [
+                (
+                    Severities.INFO,
+                    report_codes.BOOTH_FETCHING_CONFIG_FROM_NODE,
+                    {
+                        "node": "node",
+                        "config": "name"
+                    }
+                ),
+                (
+                    Severities.INFO,
+                    report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
+                    {
+                        "node": None,
+                        "name": "name",
+                        "name_list": ["name"]
+                    }
+                )
+            ]
+        )
+
+    def test_without_authfile(self, mock_pull):
+        mock_pull.return_value = {
+            "config": {
+                "name": "name.conf",
+                "data": "config"
+            },
+            "authfile": {
+                "name": None,
+                "data": None
+            }
+        }
+        commands.pull_config(self.mock_env, "node", "name")
+        mock_pull.assert_called_once_with(
+            self.mock_com, NodeAddresses("node"), "name"
+        )
+        self.mock_env.booth.create_config.called_once_with("config", True)
+        self.assertEqual(0, self.mock_env.booth.set_key_path.call_count)
+        self.assertEqual(0, self.mock_env.booth.create_key.call_count)
+        assert_report_item_list_equal(
+            self.mock_rep.report_item_list,
+            [
+                (
+                    Severities.INFO,
+                    report_codes.BOOTH_FETCHING_CONFIG_FROM_NODE,
+                    {
+                        "node": "node",
+                        "config": "name"
+                    }
+                ),
+                (
+                    Severities.INFO,
+                    report_codes.BOOTH_CONFIG_ACCEPTED_BY_NODE,
+                    {
+                        "node": None,
+                        "name": "name",
+                        "name_list": ["name"]
+                    }
+                )
+            ]
+        )
+
+    def test_invalid_input(self, mock_pull):
+        mock_pull.return_value = {}
+        assert_raise_library_error(
+            lambda: commands.pull_config(self.mock_env, "node", "name"),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_RESPONSE_FORMAT,
+                {"node": "node"}
+            )
+        )
+        mock_pull.assert_called_once_with(
+            self.mock_com, NodeAddresses("node"), "name"
+        )
+        self.assertEqual(0, self.mock_env.booth.create_config.call_count)
+        self.assertEqual(0, self.mock_env.booth.set_key_path.call_count)
+        self.assertEqual(0, self.mock_env.booth.create_key.call_count)
+        assert_report_item_list_equal(
+            self.mock_rep.report_item_list,
+            [(
+                Severities.INFO,
+                report_codes.BOOTH_FETCHING_CONFIG_FROM_NODE,
+                {
+                    "node": "node",
+                    "config": "name"
+                }
+            )]
+        )
+
+class TicketOperationTest(TestCase):
+    @mock.patch("pcs.lib.booth.resource.find_bound_ip")
+    def test_raises_when_implicit_site_not_found_in_cib(
+        self, mock_find_bound_ip
+    ):
+        mock_find_bound_ip.return_value = []
+        assert_raise_library_error(
+            lambda: commands.ticket_operation(
+                "grant", mock.Mock(), "booth", "ABC", site_ip=None
+            ),
+            (
+                Severities.ERROR,
+                report_codes.BOOTH_CANNOT_DETERMINE_LOCAL_SITE_IP,
+                {}
+            ),
+        )
+
+    def test_raises_when_command_fail(self):
+        mock_run = mock.Mock(return_value=("some message", "error", 1))
+        mock_env = mock.MagicMock(
+            cmd_runner=mock.Mock(return_value=mock.MagicMock(run=mock_run))
+        )
+        assert_raise_library_error(
+            lambda: commands.ticket_operation(
+                "grant", mock_env, "booth", "ABC", site_ip="1.2.3.4"
+            ),
+            (
+                Severities.ERROR,
+                report_codes.BOOTH_TICKET_OPERATION_FAILED,
+                {
+                    "operation": "grant",
+                    "reason": "error\nsome message",
+                    "site_ip": "1.2.3.4",
+                    "ticket_name": "ABC",
+                }
+            ),
+        )
+
+class CreateInClusterTest(TestCase):
+    @patch_commands("get_resources", mock.MagicMock())
+    def test_raises_when_is_created_already(self):
+        assert_raise_library_error(
+            lambda: commands.create_in_cluster(
+                mock.MagicMock(), "somename", ip="1.2.3.4",
+                resource_create=None, resource_remove=None,
+            ),
+            (
+                Severities.ERROR,
+                report_codes.BOOTH_ALREADY_IN_CIB,
+                {
+                    "name": "somename",
+                }
+            ),
+        )
+
+class FindResourceElementsForOperationTest(TestCase):
+    @patch_commands("resource.find_for_config", mock.Mock(return_value=[]))
+    def test_raises_when_no_booth_resource_found(self):
+        assert_raise_library_error(
+            lambda: commands._find_resource_elements_for_operation(
+                mock.MagicMock(),
+                "somename",
+                allow_multiple=False
+            ),
+            (
+                Severities.ERROR,
+                report_codes.BOOTH_NOT_EXISTS_IN_CIB,
+                {
+                    'name': 'somename',
+                }
+            ),
+        )
+
+    @patch_commands(
+        "resource.find_for_config", mock.Mock(return_value=["b_el1", "b_el2"])
+    )
+    def test_raises_when_multiple_booth_resource_found(self):
+        assert_raise_library_error(
+            lambda: commands._find_resource_elements_for_operation(
+                mock.MagicMock(),
+                "somename",
+                allow_multiple=False
+            ),
+            (
+                Severities.ERROR,
+                report_codes.BOOTH_MULTIPLE_TIMES_IN_CIB,
+                {
+                    'name': 'somename',
+                },
+                report_codes.FORCE_BOOTH_REMOVE_FROM_CIB,
+            ),
+        )
+
+    @patch_commands("get_resources", mock.Mock(return_value="resources"))
+    @patch_commands("resource.get_remover", mock.MagicMock())
+    @patch_commands("resource.find_for_config", mock.Mock(return_value=[1, 2]))
+    def test_warn_when_multiple_booth_resources_removed(self):
+        report_processor=MockLibraryReportProcessor()
+        commands._find_resource_elements_for_operation(
+            mock.MagicMock(report_processor=report_processor),
+            "somename",
+            allow_multiple=True,
+        )
+        assert_report_item_list_equal(report_processor.report_item_list, [(
+            Severities.WARNING,
+            report_codes.BOOTH_MULTIPLE_TIMES_IN_CIB,
+            {
+                'name': 'somename',
+            },
+        )])
diff --git a/pcs/lib/commands/test/test_constraint_common.py b/pcs/lib/commands/test/test_constraint_common.py
index e0872ff..cb5e177 100644
--- a/pcs/lib/commands/test/test_constraint_common.py
+++ b/pcs/lib/commands/test/test_constraint_common.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
 from lxml import etree
 
@@ -17,7 +17,7 @@ from pcs.test.tools.assertions import(
     assert_xml_equal,
 )
 from pcs.test.tools.custom_mock import MockLibraryReportProcessor
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 
 
 def fixture_cib_and_constraints():
diff --git a/pcs/lib/commands/test/test_ticket.py b/pcs/lib/commands/test/test_ticket.py
index 751001b..edf592a 100644
--- a/pcs/lib/commands/test/test_ticket.py
+++ b/pcs/lib/commands/test/test_ticket.py
@@ -5,27 +5,25 @@ from __future__ import (
     unicode_literals,
 )
 
-import logging
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
+from pcs.test.tools.pcs_unittest import mock
+from pcs.test.tools.misc import create_patcher
 
 from pcs.common import report_codes
 from pcs.lib.commands.constraint import ticket as ticket_command
-from pcs.lib.env import LibraryEnvironment as Env
 from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.lib.test.misc import get_mocked_env
 from pcs.test.tools.assertions import (
     assert_xml_equal,
     assert_raise_library_error
 )
-from pcs.test.tools.custom_mock import MockLibraryReportProcessor
 from pcs.test.tools.misc import get_test_resource as rc
-from pcs.test.tools.pcs_mock import mock
 from pcs.test.tools.xml import get_xml_manipulation_creator_from_file
 
+patch_commands = create_patcher("pcs.lib.commands.constraint.ticket")
 
 class CreateTest(TestCase):
     def setUp(self):
-        self.mock_logger = mock.MagicMock(logging.Logger)
-        self.mock_reporter = MockLibraryReportProcessor()
         self.create_cib = get_xml_manipulation_creator_from_file(
             rc("cib-empty.xml")
         )
@@ -37,7 +35,7 @@ class CreateTest(TestCase):
                 .append_to_first_tag_name('resources', resource_xml)
         )
 
-        env = Env(self.mock_logger, self.mock_reporter, cib_data=str(cib))
+        env = get_mocked_env(cib_data=str(cib))
         ticket_command.create(env, "ticketA", "resourceA", {
             "loss-policy": "fence",
             "rsc-role": "master"
@@ -59,11 +57,7 @@ class CreateTest(TestCase):
         )
 
     def test_refuse_for_nonexisting_resource(self):
-        env = Env(
-            self.mock_logger,
-            self.mock_reporter,
-            cib_data=str(self.create_cib())
-        )
+        env = get_mocked_env(cib_data=str(self.create_cib()))
         assert_raise_library_error(
             lambda: ticket_command.create(
                 env, "ticketA", "resourceA", "master", {"loss-policy": "fence"}
@@ -74,3 +68,20 @@ class CreateTest(TestCase):
                 {"resource_id": "resourceA"},
             ),
         )
+
+ at patch_commands("get_constraints", mock.Mock)
+class RemoveTest(TestCase):
+    @patch_commands("ticket.remove_plain", mock.Mock(return_value=1))
+    @patch_commands("ticket.remove_with_resource_set",mock.Mock(return_value=0))
+    def test_successfully_remove_plain(self):
+        self.assertTrue(ticket_command.remove(mock.MagicMock(), "T", "R"))
+
+    @patch_commands("ticket.remove_plain", mock.Mock(return_value=0))
+    @patch_commands("ticket.remove_with_resource_set",mock.Mock(return_value=1))
+    def test_successfully_remove_with_resource_set(self):
+        self.assertTrue(ticket_command.remove(mock.MagicMock(), "T", "R"))
+
+    @patch_commands("ticket.remove_plain", mock.Mock(return_value=0))
+    @patch_commands("ticket.remove_with_resource_set",mock.Mock(return_value=0))
+    def test_raises_library_error_when_no_matching_constraint_found(self):
+        self.assertFalse(ticket_command.remove(mock.MagicMock(), "T", "R"))
diff --git a/pcs/lib/corosync/config_facade.py b/pcs/lib/corosync/config_facade.py
index 600a89b..be621c0 100644
--- a/pcs/lib/corosync/config_facade.py
+++ b/pcs/lib/corosync/config_facade.py
@@ -129,6 +129,16 @@ class ConfigFacade(object):
                     options[name] = value
         return options
 
+    def is_enabled_auto_tie_breaker(self):
+        """
+        Returns True if auto tie braker option is enabled, False otherwise.
+        """
+        auto_tie_breaker = "0"
+        for quorum in self.config.get_sections("quorum"):
+            for attr in quorum.get_attributes("auto_tie_breaker"):
+                auto_tie_breaker = attr[1]
+        return auto_tie_breaker == "1"
+
     def __validate_quorum_options(self, options):
         report_items = []
         has_qdevice = self.has_quorum_device()
@@ -488,10 +498,7 @@ class ConfigFacade(object):
         # get relevant status
         has_quorum_device = self.has_quorum_device()
         has_two_nodes = len(self.get_nodes()) == 2
-        auto_tie_breaker = False
-        for quorum in self.config.get_sections("quorum"):
-            for attr in quorum.get_attributes("auto_tie_breaker"):
-                auto_tie_breaker = attr[1] != "0"
+        auto_tie_breaker = self.is_enabled_auto_tie_breaker()
         # update two_node
         if has_two_nodes and not auto_tie_breaker and not has_quorum_device:
             quorum_section_list = self.__ensure_section(self.config, "quorum")
diff --git a/pcs/lib/corosync/live.py b/pcs/lib/corosync/live.py
index b49b9f6..67aa0e4 100644
--- a/pcs/lib/corosync/live.py
+++ b/pcs/lib/corosync/live.py
@@ -8,6 +8,7 @@ from __future__ import (
 import os.path
 
 from pcs import settings
+from pcs.common.tools import join_multilines
 from pcs.lib import reports
 from pcs.lib.errors import LibraryError
 from pcs.lib.external import NodeCommunicator
@@ -22,6 +23,9 @@ def get_local_corosync_conf():
     except IOError as e:
         raise LibraryError(reports.corosync_config_read_error(path, e.strerror))
 
+def exists_local_corosync_conf():
+    return os.path.exists(settings.corosync_conf_file)
+
 def set_remote_corosync_conf(node_communicator, node_addr, config_text):
     """
     Send corosync.conf to a node
@@ -38,42 +42,39 @@ def reload_config(runner):
     """
     Ask corosync to reload its configuration
     """
-    output, retval = runner.run([
+    stdout, stderr, retval = runner.run([
         os.path.join(settings.corosync_binaries, "corosync-cfgtool"),
         "-R"
     ])
-    if retval != 0 or "invalid option" in output:
-        raise LibraryError(
-            reports.corosync_config_reload_error(output.rstrip())
-        )
+    message = join_multilines([stderr, stdout])
+    if retval != 0 or "invalid option" in message:
+        raise LibraryError(reports.corosync_config_reload_error(message))
 
 def get_quorum_status_text(runner):
     """
     Get runtime quorum status from the local node
     """
-    output, retval = runner.run([
+    stdout, stderr, retval = runner.run([
         os.path.join(settings.corosync_binaries, "corosync-quorumtool"),
         "-p"
     ])
     # retval is 0 on success if node is not in partition with quorum
     # retval is 1 on error OR on success if node has quorum
-    if retval not in [0, 1]:
-        raise LibraryError(
-            reports.corosync_quorum_get_status_error(output)
-        )
-    return output
+    if retval not in [0, 1] or stderr.strip():
+        raise LibraryError(reports.corosync_quorum_get_status_error(stderr))
+    return stdout
 
 def set_expected_votes(runner, votes):
     """
     set expected votes in live cluster to specified value
     """
-    output, retval = runner.run([
+    stdout, stderr, retval = runner.run([
         os.path.join(settings.corosync_binaries, "corosync-quorumtool"),
         # format votes to handle the case where they are int
         "-e", "{0}".format(votes)
     ])
     if retval != 0:
         raise LibraryError(
-            reports.corosync_quorum_set_expected_votes_error(output)
+            reports.corosync_quorum_set_expected_votes_error(stderr)
         )
-    return output
+    return stdout
diff --git a/pcs/lib/corosync/qdevice_client.py b/pcs/lib/corosync/qdevice_client.py
index 98fbb0e..c9d0095 100644
--- a/pcs/lib/corosync/qdevice_client.py
+++ b/pcs/lib/corosync/qdevice_client.py
@@ -8,6 +8,7 @@ from __future__ import (
 import os.path
 
 from pcs import settings
+from pcs.common.tools import join_multilines
 from pcs.lib import reports
 from pcs.lib.errors import LibraryError
 
@@ -23,12 +24,14 @@ def get_status_text(runner, verbose=False):
     ]
     if verbose:
         cmd.append("-v")
-    output, retval = runner.run(cmd)
+    stdout, stderr, retval = runner.run(cmd)
     if retval != 0:
         raise LibraryError(
-            reports.corosync_quorum_get_status_error(output)
+            reports.corosync_quorum_get_status_error(
+                join_multilines([stderr, stdout])
+            )
         )
-    return output
+    return stdout
 
 def remote_client_enable(reporter, node_communicator, node):
     """
diff --git a/pcs/lib/corosync/qdevice_net.py b/pcs/lib/corosync/qdevice_net.py
index 4054592..fa44923 100644
--- a/pcs/lib/corosync/qdevice_net.py
+++ b/pcs/lib/corosync/qdevice_net.py
@@ -15,6 +15,7 @@ import shutil
 import tempfile
 
 from pcs import settings
+from pcs.common.tools import join_multilines
 from pcs.lib import external, reports
 from pcs.lib.errors import LibraryError
 
@@ -34,6 +35,9 @@ __qdevice_certutil = os.path.join(
     "corosync-qdevice-net-certutil"
 )
 
+class QnetdNotRunningException(Exception):
+    pass
+
 def qdevice_setup(runner):
     """
     initialize qdevice on local host
@@ -41,12 +45,15 @@ def qdevice_setup(runner):
     if external.is_dir_nonempty(settings.corosync_qdevice_net_server_certs_dir):
         raise LibraryError(reports.qdevice_already_initialized(__model))
 
-    output, retval = runner.run([
+    stdout, stderr, retval = runner.run([
         __qnetd_certutil, "-i"
     ])
     if retval != 0:
         raise LibraryError(
-            reports.qdevice_initialization_error(__model, output.rstrip())
+            reports.qdevice_initialization_error(
+                __model,
+                join_multilines([stderr, stdout])
+            )
         )
 
 def qdevice_initialized():
@@ -75,13 +82,18 @@ def qdevice_status_generic_text(runner, verbose=False):
     get qdevice runtime status in plain text
     bool verbose get more detailed output
     """
-    cmd = [__qnetd_tool, "-s"]
+    args = ["-s"]
     if verbose:
-        cmd.append("-v")
-    output, retval = runner.run(cmd)
+        args.append("-v")
+    stdout, stderr, retval = _qdevice_run_tool(runner, args)
     if retval != 0:
-        raise LibraryError(reports.qdevice_get_status_error(__model, output))
-    return output
+        raise LibraryError(
+            reports.qdevice_get_status_error(
+                __model,
+                join_multilines([stderr, stdout])
+            )
+        )
+    return stdout
 
 def qdevice_status_cluster_text(runner, cluster=None, verbose=False):
     """
@@ -89,15 +101,44 @@ def qdevice_status_cluster_text(runner, cluster=None, verbose=False):
     bool verbose get more detailed output
     string cluster show information only about specified cluster
     """
-    cmd = [__qnetd_tool, "-l"]
+    args = ["-l"]
     if verbose:
-        cmd.append("-v")
+        args.append("-v")
     if cluster:
-        cmd.extend(["-c", cluster])
-    output, retval = runner.run(cmd)
+        args.extend(["-c", cluster])
+    stdout, stderr, retval = _qdevice_run_tool(runner, args)
     if retval != 0:
-        raise LibraryError(reports.qdevice_get_status_error(__model, output))
-    return output
+        raise LibraryError(
+            reports.qdevice_get_status_error(
+                __model,
+                join_multilines([stderr, stdout])
+            )
+        )
+    return stdout
+
+def qdevice_connected_clusters(status_cluster_text):
+    """
+    parse qnetd cluster status listing and return connected clusters' names
+    string status_cluster_text output of corosync-qnetd-tool -l
+    """
+    connected_clusters = []
+    regexp = re.compile(r'^Cluster "(?P<cluster>[^"]+)":$')
+    for line in status_cluster_text.splitlines():
+        match = regexp.search(line)
+        if match:
+            connected_clusters.append(match.group("cluster"))
+    return connected_clusters
+
+def _qdevice_run_tool(runner, args):
+    """
+    run corosync-qnetd-tool, raise QnetdNotRunningException if qnetd not running
+    CommandRunner runner
+    iterable args corosync-qnetd-tool arguments
+    """
+    stdout, stderr, retval = runner.run([__qnetd_tool] + args)
+    if retval == 3 and "is qnetd running?" in stderr.lower():
+        raise QnetdNotRunningException()
+    return stdout, stderr, retval
 
 def qdevice_enable(runner):
     """
@@ -143,17 +184,19 @@ def qdevice_sign_certificate_request(runner, cert_request, cluster_name):
         reports.qdevice_certificate_sign_error
     )
     # sign the request
-    output, retval = runner.run([
+    stdout, stderr, retval = runner.run([
         __qnetd_certutil, "-s", "-c", tmpfile.name, "-n", cluster_name
     ])
     tmpfile.close() # temp file is deleted on close
     if retval != 0:
         raise LibraryError(
-            reports.qdevice_certificate_sign_error(output.strip())
+            reports.qdevice_certificate_sign_error(
+                join_multilines([stderr, stdout])
+            )
         )
     # get signed certificate, corosync tool only works with files
     return _get_output_certificate(
-        output,
+        stdout,
         reports.qdevice_certificate_sign_error
     )
 
@@ -181,12 +224,15 @@ def client_setup(runner, ca_certificate):
             reports.qdevice_initialization_error(__model, e.strerror)
         )
     # initialize client's certificate storage
-    output, retval = runner.run([
+    stdout, stderr, retval = runner.run([
         __qdevice_certutil, "-i", "-c", ca_file_path
     ])
     if retval != 0:
         raise LibraryError(
-            reports.qdevice_initialization_error(__model, output.rstrip())
+            reports.qdevice_initialization_error(
+                __model,
+                join_multilines([stderr, stdout])
+            )
         )
 
 def client_initialized():
@@ -217,15 +263,18 @@ def client_generate_certificate_request(runner, cluster_name):
     """
     if not client_initialized():
         raise LibraryError(reports.qdevice_not_initialized(__model))
-    output, retval = runner.run([
+    stdout, stderr, retval = runner.run([
         __qdevice_certutil, "-r", "-n", cluster_name
     ])
     if retval != 0:
         raise LibraryError(
-            reports.qdevice_initialization_error(__model, output.rstrip())
+            reports.qdevice_initialization_error(
+                __model,
+                join_multilines([stderr, stdout])
+            )
         )
     return _get_output_certificate(
-        output,
+        stdout,
         functools.partial(reports.qdevice_initialization_error, __model)
     )
 
@@ -243,17 +292,19 @@ def client_cert_request_to_pk12(runner, cert_request):
         reports.qdevice_certificate_import_error
     )
     # transform it
-    output, retval = runner.run([
+    stdout, stderr, retval = runner.run([
         __qdevice_certutil, "-M", "-c", tmpfile.name
     ])
     tmpfile.close() # temp file is deleted on close
     if retval != 0:
         raise LibraryError(
-            reports.qdevice_certificate_import_error(output)
+            reports.qdevice_certificate_import_error(
+                join_multilines([stderr, stdout])
+            )
         )
     # get resulting pk12, corosync tool only works with files
     return _get_output_certificate(
-        output,
+        stdout,
         reports.qdevice_certificate_import_error
     )
 
@@ -268,13 +319,15 @@ def client_import_certificate_and_key(runner, pk12_certificate):
         pk12_certificate,
         reports.qdevice_certificate_import_error
     )
-    output, retval = runner.run([
+    stdout, stderr, retval = runner.run([
         __qdevice_certutil, "-m", "-c", tmpfile.name
     ])
     tmpfile.close() # temp file is deleted on close
     if retval != 0:
         raise LibraryError(
-            reports.qdevice_certificate_import_error(output)
+            reports.qdevice_certificate_import_error(
+                join_multilines([stderr, stdout])
+            )
         )
 
 def remote_qdevice_get_ca_certificate(node_communicator, host):
diff --git a/pcs/lib/env.py b/pcs/lib/env.py
index 24e4252..b139c58 100644
--- a/pcs/lib/env.py
+++ b/pcs/lib/env.py
@@ -5,20 +5,27 @@ from __future__ import (
     unicode_literals,
 )
 
+import os.path
+
 from lxml import etree
 
+from pcs import settings
 from pcs.lib import reports
+from pcs.lib.booth.env import BoothEnv
+from pcs.lib.cib.tools import ensure_cib_version
+from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade
+from pcs.lib.corosync.live import (
+    exists_local_corosync_conf,
+    get_local_corosync_conf,
+    reload_config as reload_corosync_config,
+)
 from pcs.lib.external import (
     is_cman_cluster,
     is_service_running,
     CommandRunner,
     NodeCommunicator,
 )
-from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade
-from pcs.lib.corosync.live import (
-    get_local_corosync_conf,
-    reload_config as reload_corosync_config,
-)
+from pcs.lib.errors import LibraryError
 from pcs.lib.nodes_task import (
     distribute_corosync_conf,
     check_corosync_offline_on_nodes,
@@ -29,7 +36,6 @@ from pcs.lib.pacemaker import (
     get_cib_xml,
     replace_cib_configuration_xml,
 )
-from pcs.lib.cib.tools import ensure_cib_version
 
 
 class LibraryEnvironment(object):
@@ -43,6 +49,7 @@ class LibraryEnvironment(object):
         user_groups=None,
         cib_data=None,
         corosync_conf_data=None,
+        booth=None,
         auth_tokens_getter=None,
     ):
         self._logger = logger
@@ -51,6 +58,9 @@ class LibraryEnvironment(object):
         self._user_groups = [] if user_groups is None else user_groups
         self._cib_data = cib_data
         self._corosync_conf_data = corosync_conf_data
+        self._booth = (
+            BoothEnv(report_processor, booth) if booth is not None else None
+        )
         self._is_cman_cluster = None
         # TODO tokens probably should not be inserted from outside, but we're
         # postponing dealing with them, because it's not that easy to move
@@ -169,6 +179,24 @@ class LibraryEnvironment(object):
         else:
             self._corosync_conf_data = corosync_conf_data
 
+    def is_node_in_cluster(self):
+        if self.is_cman_cluster:
+            #TODO --cluster_conf is not propagated here. So no live check not
+            #needed here. But this should not be permanently
+            return os.path.exists(settings.corosync_conf_file)
+
+        if not self.is_corosync_conf_live:
+            raise AssertionError(
+                "Cannot check if node is in cluster with mocked corosync_conf."
+            )
+        return exists_local_corosync_conf()
+
+    def command_expect_live_corosync_env(self):
+        if not self.is_corosync_conf_live:
+            raise LibraryError(reports.live_environment_required([
+                "--corosync_conf"
+            ]))
+
     @property
     def is_corosync_conf_live(self):
         return self._corosync_conf_data is None
@@ -195,3 +223,7 @@ class LibraryEnvironment(object):
             else:
                 self._auth_tokens = {}
         return self._auth_tokens
+
+    @property
+    def booth(self):
+        return self._booth
diff --git a/pcs/lib/env_file.py b/pcs/lib/env_file.py
new file mode 100644
index 0000000..e683a57
--- /dev/null
+++ b/pcs/lib/env_file.py
@@ -0,0 +1,122 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import os.path
+
+from pcs.common import report_codes
+from pcs.common.tools import format_environment_error
+from pcs.lib import reports
+from pcs.lib.errors import ReportItemSeverity, LibraryError, LibraryEnvError
+
+
+class GhostFile(object):
+    is_live = False
+    def __init__(self, file_role, content=None):
+        self.__file_role = file_role
+        self.__content = content
+        self.__no_existing_file_expected = False
+        self.__can_overwrite_existing_file = False
+        self.__is_binary = False
+
+    def read(self):
+        if self.__content is None:
+            raise LibraryEnvError(
+                reports.file_does_not_exist(self.__file_role)
+            )
+
+        return self.__content
+
+    def remove(self, silence_no_existence):
+        raise AssertionError("Remove GhostFile is not supported.")
+
+    def write(self, content, file_operation=None, is_binary=False):
+        """
+        callable file_operation is there only for RealFile compatible interface
+            it has no efect
+        """
+        self.__is_binary = is_binary
+        self.__content = content
+
+    def assert_no_conflict_with_existing(
+        self, report_processor, can_overwrite_existing=False
+    ):
+        self.__no_existing_file_expected = True
+        self.__can_overwrite_existing_file = can_overwrite_existing
+
+    def export(self):
+        return {
+            "content": self.__content,
+            "no_existing_file_expected": self.__no_existing_file_expected,
+            "can_overwrite_existing_file": self.__can_overwrite_existing_file,
+            "is_binary": self.__is_binary,
+        }
+
+
+class RealFile(object):
+    is_live = True
+    def __init__(
+        self, file_role, file_path,
+        overwrite_code=report_codes.FORCE_FILE_OVERWRITE
+    ):
+        self.__file_role = file_role
+        self.__file_path = file_path
+        self.__overwrite_code = overwrite_code
+
+    def assert_no_conflict_with_existing(
+        self, report_processor, can_overwrite_existing=False
+    ):
+        if os.path.exists(self.__file_path):
+            report_processor.process(reports.file_already_exists(
+                self.__file_role,
+                self.__file_path,
+                ReportItemSeverity.WARNING if can_overwrite_existing
+                    else ReportItemSeverity.ERROR,
+                forceable=None if can_overwrite_existing
+                    else self.__overwrite_code,
+            ))
+
+    def write(self, content, file_operation=None, is_binary=False):
+        """
+        callable file_operation takes path and proces operation on it e.g. chmod
+        """
+        mode = "wb" if is_binary else "w"
+        try:
+            with open(self.__file_path, mode) as config_file:
+                config_file.write(content)
+            if file_operation:
+                file_operation(self.__file_path)
+        except EnvironmentError as e:
+            raise self.__report_io_error(e, "write")
+
+    def read(self):
+        try:
+            with open(self.__file_path, "r") as file:
+                return file.read()
+        except EnvironmentError as e:
+            raise self.__report_io_error(e, "read")
+
+    def remove(self, silence_no_existence=False):
+        if os.path.exists(self.__file_path):
+            try:
+                os.remove(self.__file_path)
+            except EnvironmentError as e:
+                raise self.__report_io_error(e, "remove")
+        elif not silence_no_existence:
+            raise LibraryError(reports.file_io_error(
+                self.__file_role,
+                file_path=self.__file_path,
+                operation="remove",
+                reason="File does not exist"
+            ))
+
+    def __report_io_error(self, e, operation):
+        return LibraryError(reports.file_io_error(
+            self.__file_role,
+            file_path=self.__file_path,
+            operation=operation,
+            reason=format_environment_error(e)
+        ))
diff --git a/pcs/lib/errors.py b/pcs/lib/errors.py
index 9cab5e9..0a8f4fa 100644
--- a/pcs/lib/errors.py
+++ b/pcs/lib/errors.py
@@ -8,6 +8,20 @@ from __future__ import (
 class LibraryError(Exception):
     pass
 
+class LibraryEnvError(LibraryError):
+    def __init__(self, *args, **kwargs):
+        super(LibraryEnvError, self).__init__(*args, **kwargs)
+        self.processed = []
+
+    def sign_processed(self, report):
+        self.processed.append(report)
+
+    @property
+    def unprocessed(self):
+        return [report for report in self.args if report not in self.processed]
+
+
+
 class ReportItemSeverity(object):
     ERROR = 'ERROR'
     WARNING = 'WARNING'
diff --git a/pcs/lib/external.py b/pcs/lib/external.py
index c773e5a..074d2aa 100644
--- a/pcs/lib/external.py
+++ b/pcs/lib/external.py
@@ -47,21 +47,23 @@ except ImportError:
         URLError as urllib_URLError
     )
 
-from pcs.lib import reports
-from pcs.lib.errors import LibraryError, ReportItemSeverity
+from pcs import settings
 from pcs.common import report_codes
 from pcs.common.tools import (
+    join_multilines,
     simple_cache,
     run_parallel as tools_run_parallel,
 )
-from pcs import settings
+from pcs.lib import reports
+from pcs.lib.errors import LibraryError, ReportItemSeverity
 
 
 class ManageServiceError(Exception):
     #pylint: disable=super-init-not-called
-    def __init__(self, service, message=None):
+    def __init__(self, service, message=None, instance=None):
         self.service = service
         self.message = message
+        self.instance = instance
 
 class DisableServiceError(ManageServiceError):
     pass
@@ -91,6 +93,22 @@ def is_dir_nonempty(path):
     return len(os.listdir(path)) > 0
 
 
+def _get_service_name(service, instance=None):
+    return "{0}{1}.service".format(
+        service, "" if instance is None else "@{0}".format(instance)
+    )
+
+def ensure_is_systemd():
+    """
+    Ensure if current system is systemd system. Raises Library error if not.
+    """
+    if not is_systemctl():
+        raise LibraryError(
+            reports.unsupported_operation_on_non_systemd_systems()
+        )
+
+
+
 @simple_cache
 def is_systemctl():
     """
@@ -108,74 +126,98 @@ def is_systemctl():
     return False
 
 
-def disable_service(runner, service):
+def disable_service(runner, service, instance=None):
     """
     Disable specified service in local system.
     Raise DisableServiceError or LibraryError on failure.
 
     runner -- CommandRunner
     service -- name of service
+    instance -- instance name, it ha no effect on not systemd systems.
+        If None no instance name will be used.
     """
+    if not is_service_installed(runner, service):
+        return
     if is_systemctl():
-        output, retval = runner.run([
-            "systemctl", "disable", service + ".service"
+        stdout, stderr, retval = runner.run([
+            "systemctl", "disable", _get_service_name(service, instance)
         ])
     else:
-        if not is_service_installed(runner, service):
-            return
-        output, retval = runner.run(["chkconfig", service, "off"])
+        stdout, stderr, retval = runner.run(["chkconfig", service, "off"])
     if retval != 0:
-        raise DisableServiceError(service, output.rstrip())
+        raise DisableServiceError(
+            service,
+            join_multilines([stderr, stdout]),
+            instance
+        )
 
 
-def enable_service(runner, service):
+def enable_service(runner, service, instance=None):
     """
     Enable specified service in local system.
     Raise EnableServiceError or LibraryError on failure.
 
     runner -- CommandRunner
     service -- name of service
+    instance -- instance name, it ha no effect on not systemd systems.
+        If None no instance name will be used.
     """
     if is_systemctl():
-        output, retval = runner.run([
-            "systemctl", "enable", service + ".service"
+        stdout, stderr, retval = runner.run([
+            "systemctl", "enable", _get_service_name(service, instance)
         ])
     else:
-        output, retval = runner.run(["chkconfig", service, "on"])
+        stdout, stderr, retval = runner.run(["chkconfig", service, "on"])
     if retval != 0:
-        raise EnableServiceError(service, output.rstrip())
+        raise EnableServiceError(
+            service,
+            join_multilines([stderr, stdout]),
+            instance
+        )
 
 
-def start_service(runner, service):
+def start_service(runner, service, instance=None):
     """
     Start specified service in local system
     CommandRunner runner
     string service service name
+    string instance instance name, it ha no effect on not systemd systems.
+        If None no instance name will be used.
     """
     if is_systemctl():
-        output, retval = runner.run([
-            "systemctl", "start", "{0}.service".format(service)
+        stdout, stderr, retval = runner.run([
+            "systemctl", "start", _get_service_name(service, instance)
         ])
     else:
-        output, retval = runner.run(["service", service, "start"])
+        stdout, stderr, retval = runner.run(["service", service, "start"])
     if retval != 0:
-        raise StartServiceError(service, output.rstrip())
+        raise StartServiceError(
+            service,
+            join_multilines([stderr, stdout]),
+            instance
+        )
 
 
-def stop_service(runner, service):
+def stop_service(runner, service, instance=None):
     """
     Stop specified service in local system
     CommandRunner runner
     string service service name
+    string instance instance name, it ha no effect on not systemd systems.
+        If None no instance name will be used.
     """
     if is_systemctl():
-        output, retval = runner.run([
-            "systemctl", "stop", "{0}.service".format(service)
+        stdout, stderr, retval = runner.run([
+            "systemctl", "stop", _get_service_name(service, instance)
         ])
     else:
-        output, retval = runner.run(["service", service, "stop"])
+        stdout, stderr, retval = runner.run(["service", service, "stop"])
     if retval != 0:
-        raise StopServiceError(service, output.rstrip())
+        raise StopServiceError(
+            service,
+            join_multilines([stderr, stdout]),
+            instance
+        )
 
 
 def kill_services(runner, services):
@@ -185,18 +227,19 @@ def kill_services(runner, services):
     iterable services service names
     """
     # make killall not report that a process is not running
-    output, retval = runner.run(
+    stdout, stderr, retval = runner.run(
         ["killall", "--quiet", "--signal", "9", "--"] + list(services)
     )
     # If a process isn't running, killall will still return 1 even with --quiet.
     # We don't consider that an error, so we check for output string as well.
     # If it's empty, no actuall error happened.
     if retval != 0:
-        if output.strip():
-            raise KillServicesError(list(services), output.rstrip())
+        message = join_multilines([stderr, stdout])
+        if message:
+            raise KillServicesError(list(services), message)
 
 
-def is_service_enabled(runner, service):
+def is_service_enabled(runner, service, instance=None):
     """
     Check if specified service is enabled in local system.
 
@@ -204,16 +247,16 @@ def is_service_enabled(runner, service):
     service -- name of service
     """
     if is_systemctl():
-        _, retval = runner.run(
-            ["systemctl", "is-enabled", service + ".service"]
+        dummy_stdout, dummy_stderr, retval = runner.run(
+            ["systemctl", "is-enabled", _get_service_name(service, instance)]
         )
     else:
-        _, retval = runner.run(["chkconfig", service])
+        dummy_stdout, dummy_stderr, retval = runner.run(["chkconfig", service])
 
     return retval == 0
 
 
-def is_service_running(runner, service):
+def is_service_running(runner, service, instance=None):
     """
     Check if specified service is currently running on local system.
 
@@ -221,9 +264,15 @@ def is_service_running(runner, service):
     service -- name of service
     """
     if is_systemctl():
-        _, retval = runner.run(["systemctl", "is-active", service + ".service"])
+        dummy_stdout, dummy_stderr, retval = runner.run([
+            "systemctl",
+            "is-active",
+            _get_service_name(service, instance)
+        ])
     else:
-        _, retval = runner.run(["service", service, "status"])
+        dummy_stdout, dummy_stderr, retval = runner.run(
+            ["service", service, "status"]
+        )
 
     return retval == 0
 
@@ -250,12 +299,12 @@ def get_non_systemd_services(runner):
     if is_systemctl():
         return []
 
-    output, return_code = runner.run(["chkconfig"], ignore_stderr=True)
+    stdout, dummy_stderr, return_code = runner.run(["chkconfig"])
     if return_code != 0:
         return []
 
     service_list = []
-    for service in output.splitlines():
+    for service in stdout.splitlines():
         service = service.split(" ", 1)[0]
         if service:
             service_list.append(service)
@@ -271,12 +320,14 @@ def get_systemd_services(runner):
     if not is_systemctl():
         return []
 
-    output, return_code = runner.run(["systemctl", "list-unit-files", "--full"])
+    stdout, dummy_stderr, return_code = runner.run([
+        "systemctl", "list-unit-files", "--full"
+    ])
     if return_code != 0:
         return []
 
     service_list = []
-    for service in output.splitlines():
+    for service in stdout.splitlines():
         match = re.search(r'^([\S]*)\.service', service)
         if match:
             service_list.append(match.group(1))
@@ -293,13 +344,13 @@ def is_cman_cluster(runner):
     # - corosync1 runs with cman on rhel6
     # - corosync1 can be used without cman, but we don't support it anyways
     # - corosync2 is the default result if errors occur
-    output, retval = runner.run([
+    stdout, dummy_stderr, retval = runner.run([
         os.path.join(settings.corosync_binaries, "corosync"),
         "-v"
     ])
     if retval != 0:
         return False
-    match = re.search(r"version\D+(\d+)", output)
+    match = re.search(r"version\D+(\d+)", stdout)
     return match is not None and match.group(1) == "1"
 
 
@@ -311,9 +362,11 @@ class CommandRunner(object):
         self._python2 = sys.version[0] == "2"
 
     def run(
-        self, args, ignore_stderr=False, stdin_string=None, env_extend=None,
-        binary_output=False
+        self, args, stdin_string=None, env_extend=None, binary_output=False
     ):
+        #Reset environment variables by empty dict is desired here.  We need to
+        #get rid of defaults - we do not know the context and environment of the
+        #library.  So executable must be specified with full path.
         env_vars = dict(env_extend) if env_extend else dict()
         env_vars.update(self._env_vars)
 
@@ -332,9 +385,7 @@ class CommandRunner(object):
                 # Some commands react differently if they get anything via stdin
                 stdin=(subprocess.PIPE if stdin_string is not None else None),
                 stdout=subprocess.PIPE,
-                stderr=(
-                    subprocess.PIPE if ignore_stderr else subprocess.STDOUT
-                ),
+                stderr=subprocess.PIPE,
                 preexec_fn=(
                     lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)
                 ),
@@ -344,7 +395,7 @@ class CommandRunner(object):
                 # decodes newlines and in python3 also converts bytes to str
                 universal_newlines=(not self._python2 and not binary_output)
             )
-            output, dummy_stderror = process.communicate(stdin_string)
+            out_std, out_err = process.communicate(stdin_string)
             retval = process.returncode
         except OSError as e:
             raise LibraryError(
@@ -354,13 +405,19 @@ class CommandRunner(object):
         self._logger.debug(
             (
                 "Finished running: {args}\nReturn value: {retval}"
-                + "\n--Debug Output Start--\n{output}\n--Debug Output End--"
-            ).format(args=log_args, retval=retval, output=output)
-        )
-        self._reporter.process(
-            reports.run_external_process_finished(log_args, retval, output)
+                + "\n--Debug Stdout Start--\n{out_std}\n--Debug Stdout End--"
+                + "\n--Debug Stderr Start--\n{out_err}\n--Debug Stderr End--"
+            ).format(
+                args=log_args,
+                retval=retval,
+                out_std=out_std,
+                out_err=out_err
+            )
         )
-        return output, retval
+        self._reporter.process(reports.run_external_process_finished(
+            log_args, retval, out_std, out_err
+        ))
+        return out_std, out_err, retval
 
 
 class NodeCommunicationException(Exception):
diff --git a/pcs/lib/pacemaker.py b/pcs/lib/pacemaker.py
index fd6f97b..6747b22 100644
--- a/pcs/lib/pacemaker.py
+++ b/pcs/lib/pacemaker.py
@@ -9,6 +9,7 @@ import os.path
 from lxml import etree
 
 from pcs import settings
+from pcs.common.tools import join_multilines
 from pcs.lib import reports
 from pcs.lib.errors import LibraryError
 from pcs.lib.pacemaker_state import ClusterState
@@ -26,28 +27,33 @@ def __exec(name):
     return os.path.join(settings.pacemaker_binaries, name)
 
 def get_cluster_status_xml(runner):
-    output, retval = runner.run(
+    stdout, stderr, retval = runner.run(
         [__exec("crm_mon"), "--one-shot", "--as-xml", "--inactive"]
     )
     if retval != 0:
         raise CrmMonErrorException(
-            reports.cluster_state_cannot_load(retval, output)
+            reports.cluster_state_cannot_load(join_multilines([stderr, stdout]))
         )
-    return output
+    return stdout
 
 def get_cib_xml(runner, scope=None):
     command = [__exec("cibadmin"), "--local", "--query"]
     if scope:
         command.append("--scope={0}".format(scope))
-    output, retval = runner.run(command)
+    stdout, stderr, retval = runner.run(command)
     if retval != 0:
         if retval == __EXITCODE_CIB_SCOPE_VALID_BUT_NOT_PRESENT and scope:
             raise LibraryError(
-                reports.cib_load_error_scope_missing(scope, retval, output)
+                reports.cib_load_error_scope_missing(
+                    scope,
+                    join_multilines([stderr, stdout])
+                )
             )
         else:
-            raise LibraryError(reports.cib_load_error(retval, output))
-    return output
+            raise LibraryError(
+                reports.cib_load_error(join_multilines([stderr, stdout]))
+            )
+    return stdout
 
 def get_cib(xml):
     try:
@@ -59,9 +65,9 @@ def replace_cib_configuration_xml(runner, xml, cib_upgraded=False):
     cmd = [__exec("cibadmin"), "--replace",  "--verbose", "--xml-pipe"]
     if not cib_upgraded:
         cmd += ["--scope", "configuration"]
-    output, retval = runner.run(cmd, stdin_string=xml)
+    stdout, stderr, retval = runner.run(cmd, stdin_string=xml)
     if retval != 0:
-        raise LibraryError(reports.cib_push_error(retval, output))
+        raise LibraryError(reports.cib_push_error(stderr, stdout))
 
 def replace_cib_configuration(runner, tree, cib_upgraded=False):
     #etree returns bytes: b'xml'
@@ -108,13 +114,18 @@ def resource_cleanup(runner, resource=None, node=None, force=False):
     if node:
         cmd.extend(["--node", node])
 
-    output, retval = runner.run(cmd)
+    stdout, stderr, retval = runner.run(cmd)
 
     if retval != 0:
         raise LibraryError(
-            reports.resource_cleanup_error(retval, output, resource, node)
+            reports.resource_cleanup_error(
+                join_multilines([stderr, stdout]),
+                resource,
+                node
+            )
         )
-    return output
+    # usefull output (what has been done) goes to stderr
+    return join_multilines([stdout, stderr])
 
 def nodes_standby(runner, node_list=None, all_nodes=False):
     return __nodes_standby_unstandby(runner, True, node_list, all_nodes)
@@ -124,8 +135,11 @@ def nodes_unstandby(runner, node_list=None, all_nodes=False):
 
 def has_resource_wait_support(runner):
     # returns 1 on success so we don't care about retval
-    output, dummy_retval = runner.run([__exec("crm_resource"), "-?"])
-    return "--wait" in output
+    stdout, stderr, dummy_retval = runner.run(
+        [__exec("crm_resource"), "-?"]
+    )
+    # help goes to stderr but we check stdout as well if that gets changed
+    return "--wait" in stderr or "--wait" in stdout
 
 def ensure_resource_wait_support(runner):
     if not has_resource_wait_support(runner):
@@ -135,15 +149,22 @@ def wait_for_resources(runner, timeout=None):
     args = [__exec("crm_resource"), "--wait"]
     if timeout is not None:
         args.append("--timeout={0}".format(timeout))
-    output, retval = runner.run(args)
+    stdout, stderr, retval = runner.run(args)
     if retval != 0:
+        # Usefull info goes to stderr - not only error messages, a list of
+        # pending actions in case of timeout goes there as well.
+        # We use stdout just to be sure if that's get changed.
         if retval == __EXITCODE_WAIT_TIMEOUT:
             raise LibraryError(
-                reports.resource_wait_timed_out(retval, output.strip())
+                reports.resource_wait_timed_out(
+                    join_multilines([stderr, stdout])
+                )
             )
         else:
             raise LibraryError(
-                reports.resource_wait_error(retval, output.strip())
+                reports.resource_wait_error(
+                    join_multilines([stderr, stdout])
+                )
             )
 
 def __nodes_standby_unstandby(
@@ -178,9 +199,11 @@ def __nodes_standby_unstandby(
         cmd_list.append(cmd_template)
     report = []
     for cmd in cmd_list:
-        output, retval = runner.run(cmd)
+        stdout, stderr, retval = runner.run(cmd)
         if retval != 0:
-            report.append(reports.common_error(output))
+            report.append(
+                reports.common_error(join_multilines([stderr, stdout]))
+            )
     if report:
         raise LibraryError(*report)
 
@@ -189,21 +212,23 @@ def __get_local_node_name(runner):
     # but it returns false names when cluster is not running (or we are on
     # a remote node). Getting node id first is reliable since it fails in those
     # cases.
-    output, retval = runner.run([__exec("crm_node"), "--cluster-id"])
+    stdout, dummy_stderr, retval = runner.run(
+        [__exec("crm_node"), "--cluster-id"]
+    )
     if retval != 0:
         raise LibraryError(
             reports.pacemaker_local_node_name_not_found("node id not found")
         )
-    node_id = output.strip()
+    node_id = stdout.strip()
 
-    output, retval = runner.run(
+    stdout, dummy_stderr, retval = runner.run(
         [__exec("crm_node"), "--name-for-id={0}".format(node_id)]
     )
     if retval != 0:
         raise LibraryError(
             reports.pacemaker_local_node_name_not_found("node name not found")
         )
-    node_name = output.strip()
+    node_name = stdout.strip()
 
     if node_name == "(null)":
         raise LibraryError(
diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
index 9ececf9..cff491c 100644
--- a/pcs/lib/reports.py
+++ b/pcs/lib/reports.py
@@ -262,21 +262,24 @@ def run_external_process_started(command, stdin):
         }
     )
 
-def run_external_process_finished(command, retval, stdout):
+def run_external_process_finished(command, retval, stdout, stderr):
     """
     information about result of running an external process
     command string the external process command
     retval external process's return (exit) code
     stdout string external process's stdout
+    stderr string external process's stderr
     """
     return ReportItem.debug(
         report_codes.RUN_EXTERNAL_PROCESS_FINISHED,
         "Finished running: {command}\nReturn value: {return_value}"
-        + "\n--Debug Output Start--\n{stdout}\n--Debug Output End--\n",
+        + "\n--Debug Stdout Start--\n{stdout}\n--Debug Stdout End--"
+        + "\n--Debug Stderr Start--\n{stderr}\n--Debug Stderr End--\n",
         info={
             "command": command,
             "return_value": retval,
             "stdout": stdout,
+            "stderr": stderr,
         }
     )
 
@@ -839,6 +842,19 @@ def qdevice_destroy_error(model, reason):
         }
     )
 
+def qdevice_not_running(model):
+    """
+    qdevice is expected to be running but is not running
+    string model qdevice model
+    """
+    return ReportItem.error(
+        report_codes.QDEVICE_NOT_RUNNING,
+        "Quorum device '{model}' is not running",
+        info={
+            "model": model,
+        }
+    )
+
 def qdevice_get_status_error(model, reason):
     """
     unable to get runtime status of qdevice
@@ -854,6 +870,23 @@ def qdevice_get_status_error(model, reason):
         }
     )
 
+def qdevice_used_by_clusters(
+    clusters, severity=ReportItemSeverity.ERROR, forceable=None
+):
+    """
+    Qdevice is currently being used by clusters, cannot stop it unless forced
+    """
+    return ReportItem(
+        report_codes.QDEVICE_USED_BY_CLUSTERS,
+        severity,
+        "Quorum device is currently being used by cluster(s): {clusters_str}",
+        info={
+            "clusters": clusters,
+            "clusters_str": ", ".join(clusters),
+        },
+        forceable=forceable
+    )
+
 def cman_unsupported_command():
     """
     requested library command is not available as local cluster is CMAN based
@@ -903,35 +936,31 @@ def resource_does_not_exist(resource_id):
         }
     )
 
-def cib_load_error(retval, stdout):
+def cib_load_error(reason):
     """
     cannot load cib from cibadmin, cibadmin exited with non-zero code
-    retval external process's return (exit) code
-    stdout string external process's stdout
+    string reason error description
     """
     return ReportItem.error(
         report_codes.CIB_LOAD_ERROR,
         "unable to get cib",
         info={
-            "return_value": retval,
-            "stdout": stdout,
+            "reason": reason,
         }
     )
 
-def cib_load_error_scope_missing(scope, retval, stdout):
+def cib_load_error_scope_missing(scope, reason):
     """
     cannot load cib from cibadmin, specified scope is missing in the cib
     scope string requested cib scope
-    retval external process's return (exit) code
-    stdout string external process's stdout
+    string reason error description
     """
     return ReportItem.error(
         report_codes.CIB_LOAD_ERROR_SCOPE_MISSING,
         "unable to get cib, scope '{scope}' not present in cib",
         info={
             "scope": scope,
-            "return_value": retval,
-            "stdout": stdout,
+            "reason": reason,
         }
     )
 
@@ -957,33 +986,31 @@ def cib_missing_mandatory_section(section_name):
         }
     )
 
-def cib_push_error(retval, stdout):
+def cib_push_error(reason, pushed_cib):
     """
     cannot push cib to cibadmin, cibadmin exited with non-zero code
-    retval external process's return (exit) code
-    stdout string external process's stdout
+    string reason error description
+    string pushed_cib cib which failed to be pushed
     """
     return ReportItem.error(
         report_codes.CIB_PUSH_ERROR,
-        "Unable to update cib\n{stdout}",
+        "Unable to update cib\n{reason}\n{pushed_cib}",
         info={
-            "return_value": retval,
-            "stdout": stdout,
+            "reason": reason,
+            "pushed_cib": pushed_cib,
         }
     )
 
-def cluster_state_cannot_load(retval, stdout):
+def cluster_state_cannot_load(reason):
     """
     cannot load cluster status from crm_mon, crm_mon exited with non-zero code
-    retval external process's return (exit) code
-    stdout string external process's stdout
+    string reason error description
     """
     return ReportItem.error(
         report_codes.CRM_MON_ERROR,
         "error running crm_mon, is pacemaker running?",
         info={
-            "return_value": retval,
-            "stdout": stdout,
+            "reason": reason,
         }
     )
 
@@ -1005,57 +1032,50 @@ def resource_wait_not_supported():
         "crm_resource does not support --wait, please upgrade pacemaker"
     )
 
-def resource_wait_timed_out(retval, stdout):
+def resource_wait_timed_out(reason):
     """
     waiting for resources (crm_resource --wait) failed, timeout expired
-    retval external process's return (exit) code
-    stdout string external process's stdout
+    string reason error description
     """
     return ReportItem.error(
         report_codes.RESOURCE_WAIT_TIMED_OUT,
-        "waiting timeout\n\n{stdout}",
+        "waiting timeout\n\n{reason}",
         info={
-            "return_value": retval,
-            "stdout": stdout,
+            "reason": reason,
         }
     )
 
-def resource_wait_error(retval, stdout):
+def resource_wait_error(reason):
     """
     waiting for resources (crm_resource --wait) failed
-    retval external process's return (exit) code
-    stdout string external process's stdout
+    string reason error description
     """
     return ReportItem.error(
         report_codes.RESOURCE_WAIT_ERROR,
-        "{stdout}",
+        "{reason}",
         info={
-            "return_value": retval,
-            "stdout": stdout,
+            "reason": reason,
         }
     )
 
-def resource_cleanup_error(retval, stdout, resource=None, node=None):
+def resource_cleanup_error(reason, resource=None, node=None):
     """
     an error occured when deleting resource history in pacemaker
-    retval external process's return (exit) code
-    stdout string external process's stdout
-    resource string resource which has been cleaned up
-    node string node which has been cleaned up
+    string reason error description
+    string resource resource which has been cleaned up
+    string node node which has been cleaned up
     """
     if resource:
-        text = "Unable to cleanup resource: {resource}\n{stdout}"
+        text = "Unable to cleanup resource: {resource}\n{reason}"
     else:
         text = (
-            "Unexpected error occured. 'crm_resource -C' err_code: "
-            + "{return_value}\n{stdout}"
+            "Unexpected error occured. 'crm_resource -C' error:\n{reason}"
         )
     return ReportItem.error(
         report_codes.RESOURCE_CLEANUP_ERROR,
         text,
         info={
-            "return_value": retval,
-            "stdout": stdout,
+            "reason": reason,
             "resource": resource,
             "node": node,
         }
@@ -1153,27 +1173,37 @@ def cman_broadcast_all_rings():
             + "broadcast in only one ring"
     )
 
-def service_start_started(service):
+def service_start_started(service, instance=None):
     """
     system service is being started
     string service service name or description
+    string instance instance of service
     """
+    if instance:
+        msg = "Starting {service}@{instance}..."
+    else:
+        msg = "Starting {service}..."
     return ReportItem.info(
         report_codes.SERVICE_START_STARTED,
-        "Starting {service}...",
+        msg,
         info={
             "service": service,
+            "instance": instance,
         }
     )
 
-def service_start_error(service, reason, node=None):
+def service_start_error(service, reason, node=None, instance=None):
     """
     system service start failed
     string service service name or description
     string reason error message
     string node node on which service has been requested to start
+    string instance instance of service
     """
-    msg = "Unable to start {service}: {reason}"
+    if instance:
+        msg = "Unable to start {service}@{instance}: {reason}"
+    else:
+        msg = "Unable to start {service}: {reason}"
     return ReportItem.error(
         report_codes.SERVICE_START_ERROR,
         msg if node is None else "{node}: " + msg,
@@ -1181,33 +1211,43 @@ def service_start_error(service, reason, node=None):
             "service": service,
             "reason": reason,
             "node": node,
+            "instance": instance,
         }
     )
 
-def service_start_success(service, node=None):
+def service_start_success(service, node=None, instance=None):
     """
     system service was started successfully
     string service service name or description
     string node node on which service has been requested to start
+    string instance instance of service
     """
-    msg = "{service} started"
+    if instance:
+        msg = "{service}@{instance} started"
+    else:
+        msg = "{service} started"
     return ReportItem.info(
         report_codes.SERVICE_START_SUCCESS,
         msg if node is None else "{node}: " + msg,
         info={
             "service": service,
             "node": node,
+            "instance": instance,
         }
     )
 
-def service_start_skipped(service, reason, node=None):
+def service_start_skipped(service, reason, node=None, instance=None):
     """
     starting system service was skipped, no error occured
     string service service name or description
     string reason why the start has been skipped
     string node node on which service has been requested to start
+    string instance instance of service
     """
-    msg = "not starting {service} - {reason}"
+    if instance:
+        msg = "not starting {service}@{instance} - {reason}"
+    else:
+        msg = "not starting {service} - {reason}"
     return ReportItem.info(
         report_codes.SERVICE_START_SKIPPED,
         msg if node is None else "{node}: " + msg,
@@ -1215,30 +1255,41 @@ def service_start_skipped(service, reason, node=None):
             "service": service,
             "reason": reason,
             "node": node,
+            "instance": instance,
         }
     )
 
-def service_stop_started(service):
+def service_stop_started(service, instance=None):
     """
     system service is being stopped
     string service service name or description
+    string instance instance of service
     """
+    if instance:
+        msg = "Stopping {service}@{instance}..."
+    else:
+        msg = "Stopping {service}..."
     return ReportItem.info(
         report_codes.SERVICE_STOP_STARTED,
-        "Stopping {service}...",
+        msg,
         info={
             "service": service,
+            "instance": instance,
         }
     )
 
-def service_stop_error(service, reason, node=None):
+def service_stop_error(service, reason, node=None, instance=None):
     """
     system service stop failed
     string service service name or description
     string reason error message
     string node node on which service has been requested to stop
+    string instance instance of service
     """
-    msg = "Unable to stop {service}: {reason}"
+    if instance:
+        msg = "Unable to stop {service}@{instance}: {reason}"
+    else:
+        msg = "Unable to stop {service}: {reason}"
     return ReportItem.error(
         report_codes.SERVICE_STOP_ERROR,
         msg if node is None else "{node}: " + msg,
@@ -1246,22 +1297,28 @@ def service_stop_error(service, reason, node=None):
             "service": service,
             "reason": reason,
             "node": node,
+            "instance": instance,
         }
     )
 
-def service_stop_success(service, node=None):
+def service_stop_success(service, node=None, instance=None):
     """
     system service was stopped successfully
     string service service name or description
     string node node on which service has been requested to stop
+    string instance instance of service
     """
-    msg = "{service} stopped"
+    if instance:
+        msg = "{service}@{instance} stopped"
+    else:
+        msg = "{service} stopped"
     return ReportItem.info(
         report_codes.SERVICE_STOP_SUCCESS,
         msg if node is None else "{node}: " + msg,
         info={
             "service": service,
             "node": node,
+            "instance": instance,
         }
     )
 
@@ -1295,27 +1352,37 @@ def service_kill_success(services):
         }
     )
 
-def service_enable_started(service):
+def service_enable_started(service, instance=None):
     """
     system service is being enabled
     string service service name or description
+    string instance instance of service
     """
+    if instance:
+        msg = "Enabling {service}@{instance}..."
+    else:
+        msg = "Enabling {service}..."
     return ReportItem.info(
         report_codes.SERVICE_ENABLE_STARTED,
-        "Enabling {service}...",
+        msg,
         info={
             "service": service,
+            "instance": instance,
         }
     )
 
-def service_enable_error(service, reason, node=None):
+def service_enable_error(service, reason, node=None, instance=None):
     """
     system service enable failed
     string service service name or description
     string reason error message
     string node node on which service was enabled
+    string instance instance of service
     """
-    msg = "Unable to enable {service}: {reason}"
+    if instance:
+        msg = "Unable to enable {service}@{instance}: {reason}"
+    else:
+        msg = "Unable to enable {service}: {reason}"
     return ReportItem.error(
         report_codes.SERVICE_ENABLE_ERROR,
         msg if node is None else "{node}: " + msg,
@@ -1323,33 +1390,43 @@ def service_enable_error(service, reason, node=None):
             "service": service,
             "reason": reason,
             "node": node,
+            "instance": instance,
         }
     )
 
-def service_enable_success(service, node=None):
+def service_enable_success(service, node=None, instance=None):
     """
     system service was enabled successfully
     string service service name or description
     string node node on which service has been enabled
+    string instance instance of service
     """
-    msg = "{service} enabled"
+    if instance:
+        msg = "{service}@{instance} enabled"
+    else:
+        msg = "{service} enabled"
     return ReportItem.info(
         report_codes.SERVICE_ENABLE_SUCCESS,
         msg if node is None else "{node}: " + msg,
         info={
             "service": service,
             "node": node,
+            "instance": instance,
         }
     )
 
-def service_enable_skipped(service, reason, node=None):
+def service_enable_skipped(service, reason, node=None, instance=None):
     """
     enabling system service was skipped, no error occured
     string service service name or description
     string reason why the enabling has been skipped
     string node node on which service has been requested to enable
+    string instance instance of service
     """
-    msg = "not enabling {service} - {reason}"
+    if instance:
+        msg = "not enabling {service}@{instance} - {reason}"
+    else:
+        msg = "not enabling {service} - {reason}"
     return ReportItem.info(
         report_codes.SERVICE_ENABLE_SKIPPED,
         msg if node is None else "{node}: " + msg,
@@ -1357,30 +1434,41 @@ def service_enable_skipped(service, reason, node=None):
             "service": service,
             "reason": reason,
             "node": node,
+            "instance": instance
         }
     )
 
-def service_disable_started(service):
+def service_disable_started(service, instance=None):
     """
     system service is being disabled
     string service service name or description
+    string instance instance of service
     """
+    if instance:
+        msg = "Disabling {service}@{instance}..."
+    else:
+        msg = "Disabling {service}..."
     return ReportItem.info(
         report_codes.SERVICE_DISABLE_STARTED,
-        "Disabling {service}...",
+        msg,
         info={
             "service": service,
+            "instance": instance,
         }
     )
 
-def service_disable_error(service, reason, node=None):
+def service_disable_error(service, reason, node=None, instance=None):
     """
     system service disable failed
     string service service name or description
     string reason error message
     string node node on which service was disabled
+    string instance instance of service
     """
-    msg = "Unable to disable {service}: {reason}"
+    if instance:
+        msg = "Unable to disable {service}@{instance}: {reason}"
+    else:
+        msg = "Unable to disable {service}: {reason}"
     return ReportItem.error(
         report_codes.SERVICE_DISABLE_ERROR,
         msg if node is None else "{node}: " + msg,
@@ -1388,22 +1476,28 @@ def service_disable_error(service, reason, node=None):
             "service": service,
             "reason": reason,
             "node": node,
+            "instance": instance,
         }
     )
 
-def service_disable_success(service, node=None):
+def service_disable_success(service, node=None, instance=None):
     """
     system service was disabled successfully
     string service service name or description
     string node node on which service was disabled
+    string instance instance of service
     """
-    msg = "{service} disabled"
+    if instance:
+        msg = "{service}@{instance} disabled"
+    else:
+        msg = "{service} disabled"
     return ReportItem.info(
         report_codes.SERVICE_DISABLE_SUCCESS,
         msg if node is None else "{node}: " + msg,
         info={
             "service": service,
             "node": node,
+            "instance": instance,
         }
     )
 
@@ -1627,6 +1721,19 @@ def watchdog_not_found(node, watchdog):
     )
 
 
+def invalid_watchdog_path(watchdog):
+    """
+    watchdog path is not absolut path
+
+    watchdog -- watchdog device path
+    """
+    return ReportItem.error(
+        report_codes.WATCHDOG_INVALID,
+        "Watchdog path '{watchdog}' is invalid.",
+        info={"watchdog": watchdog}
+    )
+
+
 def unable_to_get_sbd_status(node, reason):
     """
     there was (communication or parsing) failure during obtaining status of SBD
@@ -1654,40 +1761,39 @@ def cluster_restart_required_to_apply_changes():
     )
 
 
-def cib_alert_recipient_already_exists(alert_id, recipient_value):
+def cib_alert_recipient_already_exists(
+    alert_id, recipient_value, severity=ReportItemSeverity.ERROR, forceable=None
+):
     """
-    Error that recipient already exists.
+    Recipient with specified value already exists in alert with id 'alert_id'
 
     alert_id -- id of alert to which recipient belongs
     recipient_value -- value of recipient
     """
-    return ReportItem.error(
+    return ReportItem(
         report_codes.CIB_ALERT_RECIPIENT_ALREADY_EXISTS,
-        "Recipient '{recipient}' in alert '{alert}' already exists.",
+        severity,
+        "Recipient '{recipient}' in alert '{alert}' already exists",
         info={
             "recipient": recipient_value,
             "alert": alert_id
-        }
+        },
+        forceable=forceable
     )
 
 
-def cib_alert_recipient_not_found(alert_id, recipient_value):
+def cib_alert_recipient_invalid_value(recipient_value):
     """
-    Specified recipient not found.
+    Invalid recipient value.
 
-    alert_id -- id of alert to which recipient should belong
     recipient_value -- recipient value
     """
     return ReportItem.error(
-        report_codes.CIB_ALERT_RECIPIENT_NOT_FOUND,
-        "Recipient '{recipient}' not found in alert '{alert}'.",
-        info={
-            "recipient": recipient_value,
-            "alert": alert_id
-        }
+        report_codes.CIB_ALERT_RECIPIENT_VALUE_INVALID,
+        "Recipient value '{recipient}' is not valid.",
+        info={"recipient": recipient_value}
     )
 
-
 def cib_alert_not_found(alert_id):
     """
     Alert with specified id doesn't exist.
@@ -1743,3 +1849,114 @@ def unable_to_upgrade_cib_to_required_version(
             "current_version": "{0}.{1}.{2}".format(*current_version)
         }
     )
+
+def file_already_exists(
+        file_role, file_path, severity=ReportItemSeverity.ERROR,
+        forceable=None, node=None
+    ):
+    msg = "file {file_path} already exists"
+    if file_role:
+        msg = "{file_role} " + msg
+    if node:
+        msg = "{node}: " + msg
+    return ReportItem(
+        report_codes.FILE_ALREADY_EXISTS,
+        severity,
+        msg,
+        info={
+            "file_role": file_role,
+            "file_path": file_path,
+            "node": node,
+        },
+        forceable=forceable,
+    )
+
+def file_does_not_exist(file_role, file_path=""):
+    return ReportItem.error(
+        report_codes.FILE_DOES_NOT_EXIST,
+        "{file_role} file {file_path} does not exist",
+        info={
+            "file_role": file_role,
+            "file_path": file_path,
+        },
+    )
+
+def file_io_error(
+    file_role, file_path="", reason="", operation="work with",
+    severity=ReportItemSeverity.ERROR
+):
+    if file_path:
+        msg = "unable to {operation} {file_role} '{file_path}': {reason}"
+    else:
+        msg = "unable to {operation} {file_role}: {reason}"
+    return ReportItem(
+        report_codes.FILE_IO_ERROR,
+        severity,
+        msg,
+        info={
+            "file_role": file_role,
+            "file_path": file_path,
+            "reason": reason,
+            "operation": operation
+        },
+    )
+
+def unable_to_determine_user_uid(user):
+    return ReportItem.error(
+        report_codes.UNABLE_TO_DETERMINE_USER_UID,
+        "Unable to determine uid of user '{user}'",
+        info={
+            "user": user
+        }
+    )
+
+def unable_to_determine_group_gid(group):
+    return ReportItem.error(
+        report_codes.UNABLE_TO_DETERMINE_GROUP_GID,
+        "Unable to determine gid of group '{group}'",
+        info={
+            "group": group
+        }
+    )
+
+def unsupported_operation_on_non_systemd_systems():
+    return ReportItem.error(
+        report_codes.UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS,
+        "unsupported operation on non systemd systems"
+    )
+
+def live_environment_required(forbidden_options):
+    return ReportItem.error(
+        report_codes.LIVE_ENVIRONMENT_REQUIRED,
+        "This command does not support {options_string}",
+        info={
+            "forbidden_options": forbidden_options,
+            "options_string": ", ".join(forbidden_options),
+        }
+    )
+
+
+def quorum_cannot_disable_atb_due_to_sbd(
+    severity=ReportItemSeverity.ERROR, forceable=None
+):
+    """
+    Quorum option auto_tie_breaker cannot be disbled due to SBD.
+    """
+    return ReportItem(
+        report_codes.COROSYNC_QUORUM_CANNOT_DISABLE_ATB_DUE_TO_SBD,
+        severity,
+        "unable to disable auto_tie_breaker: SBD fencing will have no effect",
+        forceable=forceable
+    )
+
+
+def sbd_requires_atb():
+    """
+    Warning that ATB will be enabled in order to make SBD fencing effective.
+    """
+    return ReportItem.warning(
+        report_codes.SBD_REQUIRES_ATB,
+        "auto_tie_breaker quorum option will be enabled to make SBD fencing "
+        "effective. Cluster has to be offline to be able to make this change."
+    )
+
diff --git a/pcs/lib/resource_agent.py b/pcs/lib/resource_agent.py
index ea93875..d49b5c0 100644
--- a/pcs/lib/resource_agent.py
+++ b/pcs/lib/resource_agent.py
@@ -125,14 +125,14 @@ def _get_pcmk_advanced_stonith_parameters(runner):
     """
     @simple_cache
     def __get_stonithd_parameters():
-        output, retval = runner.run(
-            [settings.stonithd_binary, "metadata"], ignore_stderr=True
+        stdout, stderr, dummy_retval = runner.run(
+            [settings.stonithd_binary, "metadata"]
         )
-        if output.strip() == "":
-            raise UnableToGetAgentMetadata("stonithd", output)
+        if stdout.strip() == "":
+            raise UnableToGetAgentMetadata("stonithd", stderr)
 
         try:
-            params = _get_agent_parameters(etree.fromstring(output))
+            params = _get_agent_parameters(etree.fromstring(stdout))
             for param in params:
                 param["longdesc"] = "{0}\n{1}".format(
                     param["shortdesc"], param["longdesc"]
@@ -166,15 +166,15 @@ def get_fence_agent_metadata(runner, fence_agent):
     ):
         raise AgentNotFound(fence_agent)
 
-    output, retval = runner.run(
-        [script_path, "-o", "metadata"], ignore_stderr=True
+    stdout, stderr, dummy_retval = runner.run(
+        [script_path, "-o", "metadata"]
     )
 
-    if output.strip() == "":
-        raise UnableToGetAgentMetadata(fence_agent, output)
+    if stdout.strip() == "":
+        raise UnableToGetAgentMetadata(fence_agent, stderr)
 
     try:
-        return etree.fromstring(output)
+        return etree.fromstring(stdout)
     except etree.XMLSyntaxError as e:
         raise UnableToGetAgentMetadata(fence_agent, str(e))
 
@@ -219,17 +219,16 @@ def _get_ocf_resource_agent_metadata(runner, provider, agent):
     if not __is_path_abs(script_path) or not is_path_runnable(script_path):
         raise AgentNotFound(agent_name)
 
-    output, retval = runner.run(
+    stdout, stderr, dummy_retval = runner.run(
         [script_path, "meta-data"],
-        env_extend={"OCF_ROOT": settings.ocf_root},
-        ignore_stderr=True
+        env_extend={"OCF_ROOT": settings.ocf_root}
     )
 
-    if output.strip() == "":
-        raise UnableToGetAgentMetadata(agent_name, output)
+    if stdout.strip() == "":
+        raise UnableToGetAgentMetadata(agent_name, stderr)
 
     try:
-        return etree.fromstring(output)
+        return etree.fromstring(stdout)
     except etree.XMLSyntaxError as e:
         raise UnableToGetAgentMetadata(agent_name, str(e))
 
diff --git a/pcs/lib/sbd.py b/pcs/lib/sbd.py
index 4488a73..9b57400 100644
--- a/pcs/lib/sbd.py
+++ b/pcs/lib/sbd.py
@@ -46,6 +46,83 @@ def _run_parallel_and_raise_lib_error_on_failure(func, param_list):
         raise LibraryError(*report_list)
 
 
+def _even_number_of_nodes_and_no_qdevice(
+    corosync_conf_facade, node_number_modifier=0
+):
+    """
+    Returns True whenever cluster has no quorum device configured and number of
+    nodes + node_number_modifier is even number, False otherwise.
+
+    corosync_conf_facade --
+    node_number_modifier -- this value will be added to current number of nodes.
+        This can be useful to test whenever is ATB needed when adding/removing
+        node.
+    """
+    return (
+        not corosync_conf_facade.has_quorum_device()
+        and
+        (len(corosync_conf_facade.get_nodes()) + node_number_modifier) % 2 == 0
+    )
+
+
+def is_auto_tie_breaker_needed(
+    runner, corosync_conf_facade, node_number_modifier=0
+):
+    """
+    Returns True whenever quorum option auto tie breaker is needed to be enabled
+    for proper working of SBD fencing. False if it is not needed.
+
+    runner -- command runner
+    corosync_conf_facade --
+    node_number_modifier -- this value vill be added to current number of nodes.
+        This can be useful to test whenever is ATB needed when adding/removeing
+        node.
+    """
+    return (
+        _even_number_of_nodes_and_no_qdevice(
+            corosync_conf_facade, node_number_modifier
+        )
+        and
+        is_sbd_installed(runner)
+        and
+        is_sbd_enabled(runner)
+    )
+
+
+def atb_has_to_be_enabled_pre_enable_check(corosync_conf_facade):
+    """
+    Returns True whenever quorum option auto_tie_breaker is needed to be enabled
+    for proper working of SBD fencing. False if it is not needed. This function
+    doesn't check if sbd is installed nor enabled.
+     """
+    return (
+        not corosync_conf_facade.is_enabled_auto_tie_breaker()
+        and
+        _even_number_of_nodes_and_no_qdevice(corosync_conf_facade)
+    )
+
+
+def atb_has_to_be_enabled(runner, corosync_conf_facade, node_number_modifier=0):
+    """
+    Return True whenever quorum option auto tie breaker has to be enabled for
+    proper working of SBD fencing. False if it's not needed or it is already
+    enabled.
+
+    runner -- command runner
+    corosync_conf_facade --
+    node_number_modifier -- this value vill be added to current number of nodes.
+        This can be useful to test whenever is ATB needed when adding/removeing
+        node.
+    """
+    return (
+        not corosync_conf_facade.is_enabled_auto_tie_breaker()
+        and
+        is_auto_tie_breaker_needed(
+            runner, corosync_conf_facade, node_number_modifier
+        )
+    )
+
+
 def check_sbd(communicator, node, watchdog):
     """
     Check SBD on specified 'node' and existence of specified watchdog.
@@ -123,18 +200,23 @@ def set_sbd_config(communicator, node, config):
     )
 
 
-def set_sbd_config_on_node(report_processor, node_communicator, node, config):
+def set_sbd_config_on_node(
+    report_processor, node_communicator, node, config, watchdog
+):
     """
-    Send SBD configuration to 'node'. Also puts correct node name into
-        SBD_OPTS option (SBD_OPTS="-n <node_name>").
+    Send SBD configuration to 'node' with specified watchdog set. Also puts
+    correct node name into SBD_OPTS option (SBD_OPTS="-n <node_name>").
 
     report_processor --
     node_communicator -- NodeCommunicator
     node -- NodeAddresses
     config -- dictionary in format: <SBD config option>: <value>
+    watchdog -- path to watchdog device
     """
     config = dict(config)
     config["SBD_OPTS"] = '"-n {node_name}"'.format(node_name=node.label)
+    if watchdog:
+        config["SBD_WATCHDOG_DEV"] = watchdog
     set_sbd_config(node_communicator, node, dict_to_environment_file(config))
     report_processor.process(
         reports.sbd_config_accepted_by_node(node.label)
@@ -142,7 +224,7 @@ def set_sbd_config_on_node(report_processor, node_communicator, node, config):
 
 
 def set_sbd_config_on_all_nodes(
-        report_processor, node_communicator, node_list, config
+    report_processor, node_communicator, node_list, config, watchdog_dict
 ):
     """
     Send SBD configuration 'config' to all nodes in 'node_list'. Option
@@ -153,12 +235,20 @@ def set_sbd_config_on_all_nodes(
     node_communicator -- NodeCommunicator
     node_list -- NodeAddressesList
     config -- dictionary in format: <SBD config option>: <value>
+    watchdog_dict -- dictionary of watchdogs where key is NodeAdresses object
+        and value is path to watchdog
     """
     report_processor.process(reports.sbd_config_distribution_started())
     _run_parallel_and_raise_lib_error_on_failure(
         set_sbd_config_on_node,
         [
-            ([report_processor, node_communicator, node, config], {})
+            (
+                [
+                    report_processor, node_communicator, node, config,
+                    watchdog_dict.get(node)
+                ],
+                {}
+            )
             for node in node_list
         ]
     )
@@ -362,3 +452,14 @@ def is_sbd_enabled(runner):
     runner -- CommandRunner
     """
     return external.is_service_enabled(runner, "sbd")
+
+
+def is_sbd_installed(runner):
+    """
+    Check if SBD service is installed in local system.
+    Reurns True id SBD service is installed. False otherwise.
+
+    runner -- CommandRunner
+    """
+    return external.is_service_installed(runner, "sbd")
+
diff --git a/pcs/lib/test/misc.py b/pcs/lib/test/misc.py
new file mode 100644
index 0000000..be99bb2
--- /dev/null
+++ b/pcs/lib/test/misc.py
@@ -0,0 +1,20 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import logging
+
+from pcs.lib.env import LibraryEnvironment as Env
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.pcs_unittest import mock
+
+
+def get_mocked_env(**kwargs):
+    return Env(
+        logger=mock.MagicMock(logging.Logger),
+        report_processor=MockLibraryReportProcessor(),
+        **kwargs
+    )
diff --git a/pcs/lib/test/test_env_file.py b/pcs/lib/test/test_env_file.py
new file mode 100644
index 0000000..754b40e
--- /dev/null
+++ b/pcs/lib/test/test_env_file.py
@@ -0,0 +1,187 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase
+
+from pcs.common import report_codes
+from pcs.lib.env_file import RealFile, GhostFile
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.assertions import(
+    assert_raise_library_error,
+    assert_report_item_list_equal
+)
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.pcs_unittest import mock
+
+
+class GhostFileReadTest(TestCase):
+    def test_raises_when_trying_read_nonexistent_file(self):
+        assert_raise_library_error(
+            lambda: GhostFile("some role", content=None).read(),
+            (
+                severities.ERROR,
+                report_codes.FILE_DOES_NOT_EXIST,
+                {
+                    "file_role": "some role",
+                }
+            ),
+        )
+
+ at mock.patch("pcs.lib.env_file.os.path.exists", return_value=True)
+class RealFileAssertNoConflictWithExistingTest(TestCase):
+    def check(self, report_processor, can_overwrite_existing=False):
+        real_file = RealFile("some role", "/etc/booth/some-name.conf")
+        real_file.assert_no_conflict_with_existing(
+            report_processor,
+            can_overwrite_existing
+        )
+
+    def test_success_when_config_not_exists(self, mock_exists):
+        mock_exists.return_value = False
+        report_processor=MockLibraryReportProcessor()
+        self.check(report_processor)
+        assert_report_item_list_equal(report_processor.report_item_list, [])
+
+    def test_raises_when_config_exists_and_overwrite_not_allowed(self, mock_ex):
+        assert_raise_library_error(
+            lambda: self.check(MockLibraryReportProcessor()),
+            (
+                severities.ERROR,
+                report_codes.FILE_ALREADY_EXISTS,
+                {
+                    "file_path": "/etc/booth/some-name.conf"
+                },
+                report_codes.FORCE_FILE_OVERWRITE,
+            ),
+        )
+
+    def test_warn_when_config_exists_and_overwrite_allowed(self, mock_exists):
+        report_processor=MockLibraryReportProcessor()
+        self.check(report_processor, can_overwrite_existing=True)
+        assert_report_item_list_equal(report_processor.report_item_list, [(
+            severities.WARNING,
+            report_codes.FILE_ALREADY_EXISTS,
+            {
+                "file_path": "/etc/booth/some-name.conf"
+            },
+        )])
+
+class RealFileWriteTest(TestCase):
+    def test_success_write_content_to_path(self):
+        mock_open = mock.mock_open()
+        mock_file_operation = mock.Mock()
+        with mock.patch("pcs.lib.env_file.open", mock_open, create=True):
+            RealFile("some role", "/etc/booth/some-name.conf").write(
+                "config content",
+                file_operation=mock_file_operation
+            )
+            mock_open.assert_called_once_with("/etc/booth/some-name.conf", "w")
+            mock_open().write.assert_called_once_with("config content")
+            mock_file_operation.assert_called_once_with(
+                "/etc/booth/some-name.conf"
+            )
+
+    def test_success_binary(self):
+        mock_open = mock.mock_open()
+        mock_file_operation = mock.Mock()
+        with mock.patch("pcs.lib.env_file.open", mock_open, create=True):
+            RealFile("some role", "/etc/booth/some-name.conf").write(
+                "config content".encode("utf-8"),
+                file_operation=mock_file_operation,
+                is_binary=True
+            )
+            mock_open.assert_called_once_with("/etc/booth/some-name.conf", "wb")
+            mock_open().write.assert_called_once_with(
+                "config content".encode("utf-8")
+            )
+            mock_file_operation.assert_called_once_with(
+                "/etc/booth/some-name.conf"
+            )
+
+    def test_raises_when_could_not_write(self):
+        assert_raise_library_error(
+            lambda:
+            RealFile("some role", "/no/existing/file.path").write(["content"]),
+            (
+                severities.ERROR,
+                report_codes.FILE_IO_ERROR,
+                {
+                    "reason":
+                        "No such file or directory: '/no/existing/file.path'"
+                    ,
+                }
+            )
+        )
+
+class RealFileReadTest(TestCase):
+    def test_success_read_content_from_file(self):
+        mock_open = mock.mock_open()
+        with mock.patch("pcs.lib.env_file.open", mock_open, create=True):
+            mock_open().read.return_value = "test booth\nconfig"
+            self.assertEqual(
+                "test booth\nconfig",
+                RealFile("some role", "/path/to.file").read()
+            )
+
+    def test_raises_when_could_not_read(self):
+        assert_raise_library_error(
+            lambda: RealFile("some role", "/no/existing/file.path").read(),
+            (
+                severities.ERROR,
+                report_codes.FILE_IO_ERROR,
+                {
+                    "reason":
+                        "No such file or directory: '/no/existing/file.path'"
+                    ,
+                }
+            )
+        )
+
+class RealFileRemoveTest(TestCase):
+    @mock.patch("pcs.lib.env_file.os.remove")
+    @mock.patch("pcs.lib.env_file.os.path.exists", return_value=True)
+    def test_success_remove_file(self, _, mock_remove):
+        RealFile("some role", "/path/to.file").remove()
+        mock_remove.assert_called_once_with("/path/to.file")
+
+    @mock.patch(
+        "pcs.lib.env_file.os.remove",
+        side_effect=EnvironmentError(1, "mock remove failed", "/path/to.file")
+    )
+    @mock.patch("pcs.lib.env_file.os.path.exists", return_value=True)
+    def test_raise_library_error_when_remove_failed(self, _, dummy):
+        assert_raise_library_error(
+            lambda: RealFile("some role", "/path/to.file").remove(),
+            (
+                severities.ERROR,
+                report_codes.FILE_IO_ERROR,
+                {
+                    'reason': "mock remove failed: '/path/to.file'",
+                    'file_role': 'some role',
+                    'file_path': '/path/to.file'
+                }
+            )
+        )
+
+    @mock.patch("pcs.lib.env_file.os.path.exists", return_value=False)
+    def test_existence_is_required(self, _):
+        assert_raise_library_error(
+            lambda: RealFile("some role", "/path/to.file").remove(),
+            (
+                severities.ERROR,
+                report_codes.FILE_IO_ERROR,
+                {
+                    'reason': "File does not exist",
+                    'file_role': 'some role',
+                    'file_path': '/path/to.file'
+                }
+            )
+        )
+
+    @mock.patch("pcs.lib.env_file.os.path.exists", return_value=False)
+    def test_noexistent_can_be_silenced(self, _):
+        RealFile("some role", "/path/to.file").remove(silence_no_existence=True)
diff --git a/pcs/lib/test/test_errors.py b/pcs/lib/test/test_errors.py
new file mode 100644
index 0000000..871aa76
--- /dev/null
+++ b/pcs/lib/test/test_errors.py
@@ -0,0 +1,20 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase
+
+from pcs.lib.errors import LibraryEnvError
+
+
+class LibraryEnvErrorTest(TestCase):
+    def test_can_sign_solved_reports(self):
+        e = LibraryEnvError("first", "second", "third")
+        for report in e.args:
+            if report == "second":
+                e.sign_processed(report)
+
+        self.assertEqual(["first", "third"], e.unprocessed)
diff --git a/pcs/lib/test/test_pacemaker_values.py b/pcs/lib/test/test_pacemaker_values.py
index 7979990..62b8e91 100644
--- a/pcs/lib/test/test_pacemaker_values.py
+++ b/pcs/lib/test/test_pacemaker_values.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
 from pcs.test.tools.assertions import assert_raise_library_error
 
diff --git a/pcs/node.py b/pcs/node.py
index ac154d4..729ea35 100644
--- a/pcs/node.py
+++ b/pcs/node.py
@@ -12,6 +12,8 @@ from pcs import (
     usage,
     utils,
 )
+from pcs.cli.common.errors import CmdLineInputError
+from pcs.cli.common.parse_args import prepare_options
 from pcs.lib.errors import LibraryError
 import pcs.lib.pacemaker as lib_pacemaker
 from pcs.lib.pacemaker_values import get_valid_timeout_seconds
@@ -33,13 +35,31 @@ def node_cmd(argv):
         node_standby(argv)
     elif sub_cmd == "unstandby":
         node_standby(argv, False)
+    elif sub_cmd == "attribute":
+        if "--name" in utils.pcs_options and len(argv) > 1:
+            usage.node("attribute")
+            sys.exit(1)
+        filter_attr=utils.pcs_options.get("--name", None)
+        if len(argv) == 0:
+            attribute_show_cmd(filter_attr=filter_attr)
+        elif len(argv) == 1:
+            attribute_show_cmd(argv.pop(0), filter_attr=filter_attr)
+        else:
+            attribute_set_cmd(argv.pop(0), argv)
     elif sub_cmd == "utilization":
+        if "--name" in utils.pcs_options and len(argv) > 1:
+            usage.node("utilization")
+            sys.exit(1)
+        filter_name=utils.pcs_options.get("--name", None)
         if len(argv) == 0:
-            print_nodes_utilization()
+            print_node_utilization(filter_name=filter_name)
         elif len(argv) == 1:
-            print_node_utilization(argv.pop(0))
+            print_node_utilization(argv.pop(0), filter_name=filter_name)
         else:
-            set_node_utilization(argv.pop(0), argv)
+            try:
+                set_node_utilization(argv.pop(0), argv)
+            except CmdLineInputError as e:
+                utils.exit_on_cmdline_input_errror(e, "node", "utilization")
     # pcs-to-pcsd use only
     elif sub_cmd == "pacemaker-status":
         node_pacemaker_status()
@@ -60,8 +80,8 @@ def node_maintenance(argv, on=True):
         for node in argv:
             if node not in cluster_nodes:
                 utils.err(
-                    "Node '%s' does not appear to exist in configuration" %
-                    argv[0],
+                    "Node '{0}' does not appear to exist in "
+                    "configuration".format(node),
                     False
                 )
                 failed_count += 1
@@ -70,25 +90,30 @@ def node_maintenance(argv, on=True):
     else:
         nodes.append("")
 
+    if failed_count > 0:
+        sys.exit(1)
+
     for node in nodes:
-        node = ["-N", node] if node else []
+        node_attr = ["-N", node] if node else []
         output, retval = utils.run(
             ["crm_attribute", "-t", "nodes", "-n", "maintenance"] + action +
-            node
+            node_attr
         )
         if retval != 0:
-            node_name = ("node '%s'" % node) if argv else "current node"
+            node_name = ("node '{0}'".format(node)) if argv else "current node"
             failed_count += 1
             if on:
                 utils.err(
-                    "Unable to put %s to maintenance mode.\n%s" %
-                    (node_name, output),
+                    "Unable to put {0} to maintenance mode: {1}".format(
+                        node_name, output
+                    ),
                     False
                 )
             else:
                 utils.err(
-                    "Unable to remove %s from maintenance mode.\n%s" %
-                    (node_name, output),
+                    "Unable to remove {0} from maintenance mode: {1}".format(
+                        node_name, output
+                    ),
                     False
                 )
     if failed_count > 0:
@@ -128,30 +153,62 @@ def set_node_utilization(node, argv):
     cib = utils.get_cib_dom()
     node_el = utils.dom_get_node(cib, node)
     if node_el is None:
-        utils.err("Unable to find a node: {0}".format(node))
+        if utils.usefile:
+            utils.err("Unable to find a node: {0}".format(node))
 
-    utils.dom_update_utilization(
-        node_el, utils.convert_args_to_tuples(argv), "nodes-"
-    )
+        for attrs in utils.getNodeAttributesFromPacemaker():
+            if attrs.name == node and attrs.type == "remote":
+                node_attrs = attrs
+                break
+        else:
+            utils.err("Unable to find a node: {0}".format(node))
+
+        nodes_section_list = cib.getElementsByTagName("nodes")
+        if len(nodes_section_list) == 0:
+            utils.err("Unable to get nodes section of cib")
+
+        dom = nodes_section_list[0].ownerDocument
+        node_el = dom.createElement("node")
+        node_el.setAttribute("id", node_attrs.id)
+        node_el.setAttribute("type", node_attrs.type)
+        node_el.setAttribute("uname", node_attrs.name)
+        nodes_section_list[0].appendChild(node_el)
+
+    utils.dom_update_utilization(node_el, prepare_options(argv), "nodes-")
     utils.replace_cib_configuration(cib)
 
-def print_node_utilization(node):
+def print_node_utilization(filter_node=None, filter_name=None):
     cib = utils.get_cib_dom()
-    node_el = utils.dom_get_node(cib, node)
-    if node_el is None:
-        utils.err("Unable to find a node: {0}".format(node))
-    utilization = utils.get_utilization_str(node_el)
 
-    print("Node Utilization:")
-    print(" {0}: {1}".format(node, utilization))
+    node_element_list = cib.getElementsByTagName("node")
+
+
+    if(
+        filter_node
+        and
+        filter_node not in [
+            node_element.getAttribute("uname")
+            for node_element in node_element_list
+        ]
+        and (
+            utils.usefile
+            or
+            filter_node not in [
+                node_attrs.name for node_attrs
+                in utils.getNodeAttributesFromPacemaker()
+            ]
+        )
+    ):
+        utils.err("Unable to find a node: {0}".format(filter_node))
 
-def print_nodes_utilization():
-    cib = utils.get_cib_dom()
     utilization = {}
-    for node_el in cib.getElementsByTagName("node"):
-        u = utils.get_utilization_str(node_el)
+    for node_el in node_element_list:
+        node = node_el.getAttribute("uname")
+        if filter_node is not None and node != filter_node:
+            continue
+        u = utils.get_utilization_str(node_el, filter_name)
         if u:
-            utilization[node_el.getAttribute("uname")] = u
+            utilization[node] = u
     print("Node Utilization:")
     for node in sorted(utilization):
         print(" {0}: {1}".format(node, utilization[node]))
@@ -163,3 +220,27 @@ def node_pacemaker_status():
         ))
     except LibraryError as e:
         utils.process_library_reports(e.args)
+
+def attribute_show_cmd(filter_node=None, filter_attr=None):
+    node_attributes = utils.get_node_attributes(
+        filter_node=filter_node,
+        filter_attr=filter_attr
+    )
+    print("Node Attributes:")
+    attribute_print(node_attributes)
+
+def attribute_set_cmd(node, argv):
+    try:
+        attrs = prepare_options(argv)
+    except CmdLineInputError as e:
+        utils.exit_on_cmdline_input_errror(e, "node", "attribute")
+    for name, value in attrs.items():
+        utils.set_node_attribute(name, value, node)
+
+def attribute_print(node_attributes):
+    for node in sorted(node_attributes.keys()):
+        line_parts = [" " + node + ":"]
+        for name, value in sorted(node_attributes[node].items()):
+            line_parts.append("{0}={1}".format(name, value))
+        print(" ".join(line_parts))
+
diff --git a/pcs/pcs.8 b/pcs/pcs.8
index 4426444..88c4151 100644
--- a/pcs/pcs.8
+++ b/pcs/pcs.8
@@ -1,4 +1,4 @@
-.TH PCS "8" "July 2016" "pcs 0.9.153" "System Administration Utilities"
+.TH PCS "8" "September 2016" "pcs 0.9.154" "System Administration Utilities"
 .SH NAME
 pcs \- pacemaker/corosync configuration system
 .SH SYNOPSIS
@@ -45,6 +45,9 @@ Manage quorum device provider on the local host.
 quorum
 Manage cluster quorum settings.
 .TP
+booth
+Manage booth (cluster ticket manager).
+.TP
 status
 View cluster status.
 .TP
@@ -61,8 +64,8 @@ alert
 Manage pacemaker alerts.
 .SS "resource"
 .TP
-[show [resource id]] [\fB\-\-full\fR] [\fB\-\-groups\fR]
-Show all currently configured resources or if a resource is specified show the options for the configured resource.  If \fB\-\-full\fR is specified all configured resource options will be displayed.  If \fB\-\-groups\fR is specified, only show groups (and their resources).
+[show [<resource id>] | \fB\-\-full\fR | \fB\-\-groups\fR | \fB\-\-hide\-inactive\fR]
+Show all currently configured resources or if a resource is specified show the options for the configured resource.  If \fB\-\-full\fR is specified, all configured resource options will be displayed.  If \fB\-\-groups\fR is specified, only show groups (and their resources).  If \fB\-\-hide\-inactive\fR is specified, only show active resources.
 .TP
 list [<standard|provider|type>] [\fB\-\-nodesc\fR]
 Show list of all available resources, optionally filtered by specified type, standard or provider. If \fB\-\-nodesc\fR is used then descriptions of resources are not printed.
@@ -268,8 +271,8 @@ Upgrade the CIB to conform to the latest version of the document schema.
 edit [scope=<scope> | \fB\-\-config\fR]
 Edit the cib in the editor specified by the $EDITOR environment variable and push out any changes upon saving.  Specify scope to edit a specific section of the CIB.  Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults.  \fB\-\-config\fR is the same as scope=configuration.  Use of \fB\-\-config\fR is recommended.  Do not specify a scope if you need to edit the whole CIB or be warned in the case of outdated CIB.
 .TP
-node add <node[,node\-altaddr]> [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-enable\fR]
-Add the node to corosync.conf and corosync on all nodes in the cluster and sync the new corosync.conf to the new node.  If \fB\-\-start\fR is specified also start corosync/pacemaker on the new node, if \fB\-\-wait\fR is sepcified wait up to 'n' seconds for the new node to start.  If \fB\-\-enable\fR is specified enable corosync/pacemaker on new node.  When using Redundant Ring Protocol (RRP) with udpu transport, specify the ring 0 address first followed by a ',' and then the ring 1 address.
+node add <node[,node\-altaddr]> [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-enable\fR] [\fB\-\-watchdog\fR=<watchdog\-path>]
+Add the node to corosync.conf and corosync on all nodes in the cluster and sync the new corosync.conf to the new node.  If \fB\-\-start\fR is specified also start corosync/pacemaker on the new node, if \fB\-\-wait\fR is sepcified wait up to 'n' seconds for the new node to start.  If \fB\-\-enable\fR is specified enable corosync/pacemaker on new node.  When using Redundant Ring Protocol (RRP) with udpu transport, specify the ring 0 address first followed by a ',' and then the ring 1 addre [...]
 .TP
 node remove <node>
 Shutdown specified node and remove it from pacemaker and corosync on all other nodes in the cluster.
@@ -481,12 +484,15 @@ Remove colocation constraints with specified resources.
 ticket [show] [\fB\-\-full\fR]
 List all current ticket constraints (if \fB\-\-full\fR is specified show the internal constraint id's as well).
 .TP
-ticket add <ticket> [<role>] <resource id> [options] [id=constraint\-id]
+ticket add <ticket> [<role>] <resource id> [<options>] [id=<constraint\-id>]
 Create a ticket constraint for <resource id>. Available option is loss-policy=fence/stop/freeze/demote. A role can be master, slave, started or stopped.
 .TP
-ticket set <resource1> [resourceN]... [options] [set <resourceX> ... [options]] [setoptions [constraint_options]]
+ticket set <resource1> [<resourceN>]... [<options>] [set <resourceX> ... [<options>]] setoptions <constraint_options>
 Create a ticket constraint with a resource set. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. Required constraint option is ticket=<ticket>. Optional constraint options are id=<constraint-id> and loss-policy=fence/stop/freeze/demote.
 .TP
+ticket remove <ticket> <resource id>
+Remove all ticket constraints with <ticket> from <resource id>.
+.TP
 remove [constraint id]...
 Remove constraint(s) or constraint rules with the specified id(s).
 .TP
@@ -543,7 +549,7 @@ disable <device model>
 Configure specified model of quorum device provider to not start on boot.
 .SS "quorum"
 .TP
-config
+[config]
 Show quorum configuration.
 .TP
 status
@@ -573,13 +579,66 @@ Cancel waiting for all nodes when establishing quorum.  Useful in situations whe
 .TP
 update [auto_tie_breaker=[0|1]] [last_man_standing=[0|1]] [last_man_standing_window=[<time in ms>]] [wait_for_all=[0|1]]
 Add/Change quorum options.  At least one option must be specified.  Options are documented in corosync's votequorum(5) man page.  Requires the cluster to be stopped.
+.SS "booth"
+.TP
+setup sites <address> <address> [<address>...] [arbitrators <address> ...] [\fB\-\-force\fR]
+Write new booth configuration with specified sites and arbitrators.  Total number of peers (sites and arbitrators) must be odd.  When the configuration file already exists, command fails unless \fB\-\-force\fR is specified.
+.TP
+destroy
+Remove booth configuration files.
+.TP
+ticket add <ticket> [<name>=<value> ...]
+Add new ticket to the current configuration. Ticket options are specified in booth manpage.
+
+.TP
+ticket remove <ticket>
+Remove the specified ticket from the current configuration.
+.TP
+config [<node>]
+Show booth configuration from the specified node or from the current node if node not specified.
+.TP
+create ip <address>
+Make the cluster run booth service on the specified ip address as a cluster resource.  Typically this is used to run booth site.
+.TP
+remove
+Remove booth resources created by the "pcs booth create" command.
+.TP
+restart
+Restart booth resources created by the "pcs booth create" command.
+.TP
+ticket grant <ticket> [<site address>]
+Grant the ticket for the site specified by address.  Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted.  Specifying site address is mandatory when running this command on an arbitrator.
+.TP
+ticket revoke <ticket> [<site address>]
+Revoke the ticket for the site specified by address.  Site address which has been specified with 'pcs booth create' command is used if 'site address' is omitted.  Specifying site address is mandatory when running this command on an arbitrator.
+.TP
+status
+Print current status of booth on the local node.
+.TP
+pull <node>
+Pull booth configuration from the specified node.
+.TP
+sync [\fB\-\-skip\-offline\fR]
+Send booth configuration from the local node to all nodes in the cluster.
+.TP
+enable
+Enable booth arbitrator service.
+.TP
+disable
+Disable booth arbitrator service.
+.TP
+start
+Start booth arbitrator service.
+.TP
+stop
+Stop booth arbitrator service.
 .SS "status"
 .TP
-[status] [\fB\-\-full\fR | \fB\-\-hide-inactive\fR]
-View all information about the cluster and resources (\fB\-\-full\fR provides more details, \fB\-\-hide-inactive\fR hides inactive resources).
+[status] [\fB\-\-full\fR | \fB\-\-hide\-inactive\fR]
+View all information about the cluster and resources (\fB\-\-full\fR provides more details, \fB\-\-hide\-inactive\fR hides inactive resources).
 .TP
-resources
-View current status of cluster resources.
+resources [<resource id> | \fB\-\-full\fR | \fB\-\-groups\fR | \fB\-\-hide\-inactive\fR]
+Show all currently configured resources or if a resource is specified show the options for the configured resource.  If \fB\-\-full\fR is specified, all configured resource options will be displayed.  If \fB\-\-groups\fR is specified, only show groups (and their resources).  If \fB\-\-hide\-inactive\fR is specified, only show active resources.
 .TP
 groups
 View currently configured groups and their resources.
@@ -590,6 +649,12 @@ View current cluster status.
 corosync
 View current membership information as seen by corosync.
 .TP
+quorum
+View current quorum status.
+.TP
+qdevice <device model> [\fB\-\-full\fR] [<cluster name>]
+Show runtime status of specified model of quorum device provider.  Using \fB\-\-full\fR will give more detailed output.  If <cluster name> is specified, only information about the specified cluster will be displayed.
+.TP
 nodes [corosync|both|config]
 View current status of nodes from pacemaker. If 'corosync' is specified, print nodes currently configured in corosync, if 'both' is specified, print nodes from both corosync & pacemaker.  If 'config' is specified, print nodes from corosync & pacemaker configuration.
 .TP
@@ -618,14 +683,14 @@ Show specified configuration checkpoint.
 checkpoint restore <checkpoint_number>
 Restore cluster configuration to specified checkpoint.
 .TP
-import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] [output\-format=corosync.conf|cluster.conf]
-Converts CMAN cluster configuration to Pacemaker cluster configuration.  Converted configuration will be saved to 'output' file.  To send the configuration to the cluster nodes the 'pcs config restore' command can be used.  If \fB\-\-interactive\fR is specified you will be prompted to solve incompatibilities manually.  If no input is specified /etc/cluster/cluster.conf will be used.  You can force to create output containing either cluster.conf or corosync.conf using the output-format option.
+import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] [output\-format=corosync.conf|cluster.conf] [dist=<dist>]
+Converts CMAN cluster configuration to Pacemaker cluster configuration.  Converted configuration will be saved to 'output' file.  To send the configuration to the cluster nodes the 'pcs config restore' command can be used.  If \fB\-\-interactive\fR is specified you will be prompted to solve incompatibilities manually.  If no input is specified /etc/cluster/cluster.conf will be used.  You can force to create output containing either cluster.conf or corosync.conf using the output-format op [...]
 .TP
-import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] output\-format=pcs-commands|pcs-commands-verbose
+import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] output\-format=pcs-commands|pcs-commands-verbose [dist=<dist>]
 Converts CMAN cluster configuration to a list of pcs commands which recreates the same cluster as Pacemaker cluster when executed.  Commands will be saved to 'output' file.  For other options see above.
 .TP
-export pcs\-commands|pcs\-commands\-verbose output=<filename>
-Creates a list of pcs commands which upon execution recreates the current cluster running on this node.  Commands will be saved to 'output' file.  Use pcs\-commands to get a simple list of commands, whereas pcs\-commands\-verbose creates a list including comments and debug messages.
+export pcs\-commands|pcs\-commands\-verbose [output=<filename>] [dist=<dist>]
+Creates a list of pcs commands which upon execution recreates the current cluster running on this node.  Commands will be saved to 'output' file or written to stdout if 'output' is not specified.  Use pcs\-commands to get a simple list of commands, whereas pcs\-commands\-verbose creates a list including comments and debug messages.  Optionally specify output version by setting 'dist' option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.  You can get the list of supported dist [...]
 .SS "pcsd"
 .TP
 certkey <certificate file> <key file>
@@ -638,6 +703,9 @@ clear-auth [\fB\-\-local\fR] [\fB\-\-remote\fR]
 Removes all system tokens which allow pcs/pcsd on the current system to authenticate with remote pcs/pcsd instances and vice\-versa.  After this command is run this node will need to be re\-authenticated with other nodes (using 'pcs cluster auth').  Using \fB\-\-local\fR only removes tokens used by local pcs (and pcsd if root) to connect to other pcsd instances, using \fB\-\-remote\fR clears authentication tokens used by remote systems to connect to the local pcsd instance.
 .SS "node"
 .TP
+attribute [[<node>] [\fB\-\-name\fR <name>] | <node> <name>=<value> ...]
+Manage node attributes.  If no parameters are specified, show attributes of all nodes.  If one parameter is specified, show attributes of specified node.  If \fB\-\-name\fR is specified, show specified attribute's value from all nodes.  If more parameters are specified, set attributes of specified node.  Attributes can be removed by setting an attribute without a value.
+.TP
 maintenance [\fB\-\-all\fR] | [<node>]...
 Put specified node(s) into maintenance mode, if no node or options are specified the current node will be put into maintenance mode, if \fB\-\-all\fR is specified all nodes will be put into maintenace mode.
 .TP
@@ -650,29 +718,29 @@ Put specified node into standby mode (the node specified will no longer be able
 unstandby [\fB\-\-all\fR | <node>] [\fB\-\-wait\fR[=n]]
 Remove node from standby mode (the node specified will now be able to host resources), if no node or options are specified the current node will be removed from standby mode, if \fB\-\-all\fR is specified all nodes will be removed from standby mode.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the node(s) to be removed from standby mode and then return 0 on success or 1 if the operation not succeeded yet.  If 'n' is not specified it defaults to 60 minutes.
 .TP
-utilization [<node> [<name>=<value> ...]]
-Add specified utilization options to specified node. If node is not specified, shows utilization of all nodes. If utilization options are not specified, shows utilization of specified node. Utilization option should be in format name=value, value has to be integer. Options may be removed by setting an option without a value. Example: pcs node utilization node1 cpu=4 ram=
+utilization [[<node>] [\fB\-\-name\fR <name>] | <node> <name>=<value> ...]
+Add specified utilization options to specified node.  If node is not specified, shows utilization of all nodes.  If \fB\-\-name\fR is specified, shows specified utilization value from all nodes. If utilization options are not specified, shows utilization of specified node.  Utilization option should be in format name=value, value has to be integer.  Options may be removed by setting an option without a value.  Example: pcs node utilization node1 cpu=4 ram=
 .SS "alert"
 .TP
 [config|show]
 Show all configured alerts.
 .TP
 create path=<path> [id=<alert\-id>] [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...]
-Create new alert with specified path. Id will be automatically generated if it is not specified.
+Define an alert handler with specified path. Id will be automatically generated if it is not specified.
 .TP
 update <alert\-id> [path=<path>] [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...]
-Update existing alert with specified id.
+Update existing alert handler with specified id.
 .TP
 remove <alert\-id>
-Remove alert with specified id.
+Remove alert handler with specified id.
 .TP
-recipient add <alert\-id> <recipient\-value> [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...]
-Add new recipient to specified alert.
+recipient add <alert\-id> value=<recipient\-value> [id=<recipient\-id>] [description=<description>] [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
+Add new recipient to specified alert handler.
 .TP
-recipient update <alert\-id> <recipient\-value> [description=<description>] [options [<option>=<value>]...] [meta [<meta\-option>=<value>]...]
-Update existing recipient identified by alert and it's value.
+recipient update <recipient\-id> [value=<recipient\-value>] [description=<description>] [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
+Update existing recipient identified by it's id.
 .TP
-recipient remove <alert\-id> <recipient\-value>
+recipient remove <recipient\-id>
 Remove specified recipient.
 .SH EXAMPLES
 .TP
diff --git a/pcs/prop.py b/pcs/prop.py
index 3a65990..e84e42a 100644
--- a/pcs/prop.py
+++ b/pcs/prop.py
@@ -7,10 +7,12 @@ from __future__ import (
 
 import sys
 import json
-from xml.dom.minidom import parseString
 
-from pcs import usage
-from pcs import utils
+from pcs import (
+    node,
+    usage,
+    utils,
+)
 
 def property_cmd(argv):
     if len(argv) == 0:
@@ -101,9 +103,7 @@ def unset_property(argv):
         utils.replace_cib_configuration(cib_dom)
 
 def list_property(argv):
-    print_all = False
-    if len(argv) == 0:
-        print_all = True
+    print_all = len(argv) == 0
 
     if "--all" in utils.pcs_options and "--defaults" in utils.pcs_options:
         utils.err("you cannot specify both --all and --defaults")
@@ -116,23 +116,21 @@ def list_property(argv):
         properties = {}
 
     if "--defaults" not in utils.pcs_options:
-        properties = get_set_properties(
+        properties = utils.get_set_properties(
             None if print_all else argv[0],
             properties
         )
 
     print("Cluster Properties:")
     for prop,val in sorted(properties.items()):
-        print(" " + prop + ": " + val)
+        print(" {0}: {1}".format(prop, val))
 
-    node_attributes = utils.get_node_attributes()
+    node_attributes = utils.get_node_attributes(
+        filter_attr=(None if print_all else argv[0])
+    )
     if node_attributes:
         print("Node Attributes:")
-        for node in sorted(node_attributes):
-            line_parts = [" " + node + ":"]
-            for attr in node_attributes[node]:
-                line_parts.append(attr)
-            print(" ".join(line_parts))
+        node.attribute_print(node_attributes)
 
 def get_default_properties():
     parameters = {}
@@ -141,16 +139,3 @@ def get_default_properties():
         parameters[name] = prop["default"]
     return parameters
 
-def get_set_properties(prop_name=None, defaults=None):
-    properties = {} if defaults is None else dict(defaults)
-    (output, retVal) = utils.run(["cibadmin","-Q","--scope", "crm_config"])
-    if retVal != 0:
-        utils.err("unable to get crm_config\n"+output)
-    dom = parseString(output)
-    de = dom.documentElement
-    crm_config_properties = de.getElementsByTagName("nvpair")
-    for prop in crm_config_properties:
-        if prop_name is None or (prop_name == prop.getAttribute("name")):
-            properties[prop.getAttribute("name")] = prop.getAttribute("value")
-    return properties
-
diff --git a/pcs/qdevice.py b/pcs/qdevice.py
index 0037704..2591bae 100644
--- a/pcs/qdevice.py
+++ b/pcs/qdevice.py
@@ -92,7 +92,7 @@ def qdevice_destroy_cmd(lib, argv, modifiers):
     if len(argv) != 1:
         raise CmdLineInputError()
     model = argv[0]
-    lib.qdevice.destroy(model)
+    lib.qdevice.destroy(model, modifiers["force"])
 
 def qdevice_start_cmd(lib, argv, modifiers):
     if len(argv) != 1:
@@ -104,7 +104,7 @@ def qdevice_stop_cmd(lib, argv, modifiers):
     if len(argv) != 1:
         raise CmdLineInputError()
     model = argv[0]
-    lib.qdevice.stop(model)
+    lib.qdevice.stop(model, modifiers["force"])
 
 def qdevice_kill_cmd(lib, argv, modifiers):
     if len(argv) != 1:
diff --git a/pcs/quorum.py b/pcs/quorum.py
index 2d54ed7..6cd06ca 100644
--- a/pcs/quorum.py
+++ b/pcs/quorum.py
@@ -8,10 +8,10 @@ from __future__ import (
 import sys
 
 from pcs import (
+    stonith,
     usage,
     utils,
 )
-from pcs.cluster import cluster_quorum_unblock
 from pcs.cli.common import parse_args
 from pcs.cli.common.console_report import indent
 from pcs.cli.common.errors import CmdLineInputError
@@ -19,10 +19,10 @@ from pcs.lib.errors import LibraryError
 
 def quorum_cmd(lib, argv, modificators):
     if len(argv) < 1:
-        usage.quorum()
-        sys.exit(1)
+        sub_cmd, argv_next = "config", []
+    else:
+        sub_cmd, argv_next = argv[0], argv[1:]
 
-    sub_cmd, argv_next = argv[0], argv[1:]
     try:
         if sub_cmd == "help":
             usage.quorum(argv)
@@ -35,7 +35,8 @@ def quorum_cmd(lib, argv, modificators):
         elif sub_cmd == "device":
             quorum_device_cmd(lib, argv_next, modificators)
         elif sub_cmd == "unblock":
-            cluster_quorum_unblock(argv_next)
+            # TODO switch to new architecture
+            quorum_unblock_cmd(argv_next)
         elif sub_cmd == "update":
             quorum_update_cmd(lib, argv_next, modificators)
         else:
@@ -120,7 +121,8 @@ def quorum_update_cmd(lib, argv, modificators):
 
     lib.quorum.set_options(
         options,
-        skip_offline_nodes=modificators["skip_offline_nodes"]
+        skip_offline_nodes=modificators["skip_offline_nodes"],
+        force=modificators["force"]
     )
 
 def quorum_device_add_cmd(lib, argv, modificators):
@@ -185,3 +187,58 @@ def quorum_device_update_cmd(lib, argv, modificators):
         force_options=modificators["force"],
         skip_offline_nodes=modificators["skip_offline_nodes"]
     )
+
+# TODO switch to new architecture, move to lib
+def quorum_unblock_cmd(argv):
+    if len(argv) > 0:
+        usage.quorum(["unblock"])
+        sys.exit(1)
+
+    if utils.is_rhel6():
+        utils.err("operation is not supported on CMAN clusters")
+
+    output, retval = utils.run(
+        ["corosync-cmapctl", "-g", "runtime.votequorum.wait_for_all_status"]
+    )
+    if retval != 0:
+        utils.err("unable to check quorum status")
+    if output.split("=")[-1].strip() != "1":
+        utils.err("cluster is not waiting for nodes to establish quorum")
+
+    unjoined_nodes = (
+        set(utils.getNodesFromCorosyncConf())
+        -
+        set(utils.getCorosyncActiveNodes())
+    )
+    if not unjoined_nodes:
+        utils.err("no unjoined nodes found")
+    if "--force" not in utils.pcs_options:
+        answer = utils.get_terminal_input(
+            (
+                "WARNING: If node(s) {nodes} are not powered off or they do"
+                + " have access to shared resources, data corruption and/or"
+                + " cluster failure may occur. Are you sure you want to"
+                + " continue? [y/N] "
+            ).format(nodes=", ".join(unjoined_nodes))
+        )
+        if answer.lower() not in ["y", "yes"]:
+            print("Canceled")
+            return
+    for node in unjoined_nodes:
+        stonith.stonith_confirm([node], skip_question=True)
+
+    output, retval = utils.run(
+        ["corosync-cmapctl", "-s", "quorum.cancel_wait_for_all", "u8", "1"]
+    )
+    if retval != 0:
+        utils.err("unable to cancel waiting for nodes")
+    print("Quorum unblocked")
+
+    startup_fencing = utils.get_set_properties().get("startup-fencing", "")
+    utils.set_cib_property(
+        "startup-fencing",
+        "false" if startup_fencing.lower() != "false" else "true"
+    )
+    utils.set_cib_property("startup-fencing", startup_fencing)
+    print("Waiting for nodes canceled")
+
diff --git a/pcs/resource.py b/pcs/resource.py
index 9384a21..a5bcf7c 100644
--- a/pcs/resource.py
+++ b/pcs/resource.py
@@ -60,7 +60,10 @@ def resource_cmd(argv):
             argv, with_clone=True
         )
         try:
-            resource_create(res_id, res_type, ra_values, op_values, meta_values, clone_opts)
+            resource_create(
+                res_id, res_type, ra_values, op_values, meta_values, clone_opts,
+                group=utils.pcs_options.get("--group", None)
+            )
         except CmdLineInputError as e:
             utils.exit_on_cmdline_input_errror(e, "resource", 'create')
     elif (sub_cmd == "move"):
@@ -188,7 +191,10 @@ def resource_cmd(argv):
         elif len(argv) == 1:
             print_resource_utilization(argv.pop(0))
         else:
-            set_resource_utilization(argv.pop(0), argv)
+            try:
+                set_resource_utilization(argv.pop(0), argv)
+            except CmdLineInputError as e:
+                utils.exit_on_cmdline_input_errror(e, "resource", "utilization")
     elif (sub_cmd == "get_resource_agent_info"):
         get_resource_agent_info(argv)
     else:
@@ -335,18 +341,18 @@ def resource_list_available(argv):
         print("\n".join(ret))
 
 
-def resource_print_options(agent_name, desc, params):
+def resource_print_options(agent_name, desc, params, actions):
     if desc["shortdesc"]:
         agent_name += " - " + format_desc(
             len(agent_name + " - "), desc["shortdesc"]
         )
     print(agent_name)
-    print()
     if desc["longdesc"]:
-        print(desc["longdesc"])
         print()
+        print(desc["longdesc"])
 
     if len(params) > 0:
+        print()
         print("Resource options:")
     for param in params:
         if param.get("advanced", False):
@@ -363,6 +369,19 @@ def resource_print_options(agent_name, desc, params):
         desc = format_desc(indent, desc)
         print("  " + name + ": " + desc)
 
+    if actions:
+        print()
+        print("Default operations:")
+        action_lines = []
+        for action in utils.filter_default_op_from_actions(actions):
+            parts = ["  {0}:".format(action.get("name", ""))]
+            parts.extend([
+                "{0}={1}".format(name, value)
+                for name, value in sorted(action.items())
+                if name != "name"
+            ])
+            action_lines.append(" ".join(parts))
+        print("\n".join(action_lines))
 
 def resource_list_options(resource):
     runner = utils.cmd_runner()
@@ -373,13 +392,14 @@ def resource_list_options(resource):
         )
         desc = lib_ra.get_agent_desc(metadata_dom)
         params = lib_ra.get_resource_agent_parameters(metadata_dom)
-        return desc, params
+        actions = lib_ra.get_agent_actions(metadata_dom)
+        return desc, params, actions
 
     found_resource = False
 
     try:
-        descriptions, parameters = get_desc_params(resource)
-        resource_print_options(resource, descriptions, parameters)
+        descriptions, parameters, actions = get_desc_params(resource)
+        resource_print_options(resource, descriptions, parameters, actions)
         return
     except lib_ra.UnsupportedResourceAgent:
         pass
@@ -390,7 +410,7 @@ def resource_list_options(resource):
     except LibraryError as e:
         utils.process_library_reports(e.args)
 
-    # no standard was give, lets search all ocf providers first
+    # no standard was given, let's search all ocf providers first
     providers = sorted(os.listdir(settings.ocf_resources))
     for provider in providers:
         if not os.path.exists(
@@ -399,18 +419,18 @@ def resource_list_options(resource):
             continue
         try:
             agent = "ocf:{0}:{1}".format(provider, resource)
-            descriptions, parameters = get_desc_params(agent)
-            resource_print_options(agent, descriptions, parameters)
+            descriptions, parameters, actions = get_desc_params(agent)
+            resource_print_options(agent, descriptions, parameters, actions)
             return
         except (LibraryError, lib_ra.ResourceAgentLibError):
             pass
 
-    # still not found, now lets look at nagios plugins
+    # still not found, now let's take a look at nagios plugins
     if not found_resource:
         try:
             agent = "nagios:" + resource
-            descriptions, parameters = get_desc_params(agent)
-            resource_print_options(agent, descriptions, parameters)
+            descriptions, parameters, actions = get_desc_params(agent)
+            resource_print_options(agent, descriptions, parameters, actions)
         except (LibraryError, lib_ra.ResourceAgentLibError):
             utils.err("Unable to find resource: {0}".format(resource))
 
@@ -437,7 +457,10 @@ def format_desc(indent, desc):
 
 # Create a resource using cibadmin
 # ra_class, ra_type & ra_provider must all contain valid info
-def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_opts=[]):
+def resource_create(
+    ra_id, ra_type, ra_values, op_values, meta_values=[], clone_opts=[],
+    group=None
+):
     if "--wait" in utils.pcs_options:
         wait_timeout = utils.validate_wait_get_timeout()
         if "--disabled" in utils.pcs_options:
@@ -588,7 +611,7 @@ def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_
 
     if "--clone" in utils.pcs_options or len(clone_opts) > 0:
         dom, dummy_clone_id = resource_clone_create(dom, [ra_id] + clone_opts)
-        if "--group" in utils.pcs_options:
+        if group:
             print("Warning: --group ignored when creating a clone")
         if "--master" in utils.pcs_options:
             print("Warning: --master ignored when creating a clone")
@@ -596,11 +619,10 @@ def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_
         dom, dummy_master_id = resource_master_create(
             dom, [ra_id] + master_meta_values
         )
-        if "--group" in utils.pcs_options:
+        if group:
             print("Warning: --group ignored when creating a master")
-    elif "--group" in utils.pcs_options:
-        groupname = utils.pcs_options["--group"]
-        dom = resource_group_add(dom, groupname, [ra_id])
+    elif group:
+        dom = resource_group_add(dom, group, [ra_id])
 
     utils.replace_cib_configuration(dom)
 
@@ -929,31 +951,11 @@ def resource_update(res_id,args):
             ia.setAttribute("value", val)
             instance_attributes.appendChild(ia)
 
-    meta_attributes = resource.getElementsByTagName("meta_attributes")
-    if len(meta_attributes) == 0:
-        meta_attributes = dom.createElement("meta_attributes")
-        meta_attributes.setAttribute("id", res_id + "-meta_attributes")
-        resource.appendChild(meta_attributes)
-    else:
-        meta_attributes = meta_attributes[0]
-
-    meta_attrs = utils.convert_args_to_tuples(meta_values)
-    for (key,val) in meta_attrs:
-        meta_found = False
-        for ma in meta_attributes.getElementsByTagName("nvpair"):
-            if ma.getAttribute("name") == key:
-                meta_found = True
-                if val == "":
-                    meta_attributes.removeChild(ma)
-                else:
-                    ma.setAttribute("value", val)
-                break
-        if not meta_found:
-            ma = dom.createElement("nvpair")
-            ma.setAttribute("id", res_id + "-meta_attributes-" + key)
-            ma.setAttribute("name", key)
-            ma.setAttribute("value", val)
-            meta_attributes.appendChild(ma)
+    remote_node_name = utils.dom_get_resource_remote_node_name(resource)
+    utils.dom_update_meta_attr(
+        resource,
+        utils.convert_args_to_tuples(meta_values)
+    )
 
     operations = resource.getElementsByTagName("operations")
     if len(operations) == 0:
@@ -1005,6 +1007,17 @@ def resource_update(res_id,args):
 
     utils.replace_cib_configuration(dom)
 
+    if (
+        remote_node_name
+        and
+        remote_node_name != utils.dom_get_resource_remote_node_name(resource)
+    ):
+        # if the resource was a remote node and it is not anymore, (or its name
+        # changed) we need to tell pacemaker about it
+        output, retval = utils.run([
+            "crm_node", "--force", "--remove", remote_node_name
+        ])
+
     if "--wait" in utils.pcs_options:
         args = ["crm_resource", "--wait"]
         if wait_timeout:
@@ -1231,10 +1244,22 @@ def resource_meta(res_id, argv):
     if "--wait" in utils.pcs_options:
         wait_timeout = utils.validate_wait_get_timeout()
 
+    remote_node_name = utils.dom_get_resource_remote_node_name(resource_el)
     utils.dom_update_meta_attr(resource_el, utils.convert_args_to_tuples(argv))
 
     utils.replace_cib_configuration(dom)
 
+    if (
+        remote_node_name
+        and
+        remote_node_name != utils.dom_get_resource_remote_node_name(resource_el)
+    ):
+        # if the resource was a remote node and it is not anymore, (or its name
+        # changed) we need to tell pacemaker about it
+        output, retval = utils.run([
+            "crm_node", "--force", "--remove", remote_node_name
+        ])
+
     if "--wait" in utils.pcs_options:
         args = ["crm_resource", "--wait"]
         if wait_timeout:
@@ -1618,45 +1643,9 @@ def resource_master_create(dom, argv, update=False, master_id=None):
 
     return dom, master_element.getAttribute("id")
 
-def resource_master_remove(argv):
-    if len(argv) < 1:
-        usage.resource()
-        sys.exit(1)
-
-    dom = utils.get_cib_dom()
-    master_id = argv.pop(0)
-
-    master_found = False
-# Check to see if there's a resource/group with the master_id if so, we remove the parent
-    for rg in (dom.getElementsByTagName("primitive") + dom.getElementsByTagName("group")):
-        if rg.getAttribute("id") == master_id and rg.parentNode.tagName == "master":
-            master_id = rg.parentNode.getAttribute("id")
-
-    resources_to_cleanup = []
-    for master in dom.getElementsByTagName("master"):
-        if master.getAttribute("id") == master_id:
-            childNodes = master.getElementsByTagName("primitive")
-            for child in childNodes:
-                resources_to_cleanup.append(child.getAttribute("id"))
-            master_found = True
-            break
-
-    if not master_found:
-        utils.err("Unable to find multi-state resource with id %s" % master_id)
-
-    constraints_element = dom.getElementsByTagName("constraints")
-    if len(constraints_element) > 0:
-        constraints_element = constraints_element[0]
-        for resource_id in resources_to_cleanup:
-            remove_resource_references(
-                dom, resource_id, constraints_element=constraints_element
-            )
-    master.parentNode.removeChild(master)
-    print("Removing Master - " + master_id)
-    utils.replace_cib_configuration(dom)
-
 def resource_remove(resource_id, output = True):
     dom = utils.get_cib_dom()
+    # if resource is a clone or a master, work with its child instead
     cloned_resource = utils.dom_get_clone_ms_resource(dom, resource_id)
     if cloned_resource:
         resource_id = cloned_resource.getAttribute("id")
@@ -1704,16 +1693,15 @@ def resource_remove(resource_id, output = True):
             resource_remove(res.getAttribute("id"))
         sys.exit(0)
 
+    # now we know resource is not a group, a clone nor a master
+    # because of the conditions above
+    if not utils.does_exist('//resources/descendant::primitive[@id="'+resource_id+'"]'):
+        utils.err("Resource '{0}' does not exist.".format(resource_id))
+
     group_xpath = '//group/primitive[@id="'+resource_id+'"]/..'
     group = utils.get_cib_xpath(group_xpath)
     num_resources_in_group = 0
 
-    if not utils.does_exist('//resources/descendant::primitive[@id="'+resource_id+'"]'):
-        if utils.does_exist('//resources/master[@id="'+resource_id+'"]'):
-            return resource_master_remove([resource_id])
-
-        utils.err("Resource '{0}' does not exist.".format(resource_id))
-
     if (group != ""):
         num_resources_in_group = len(parseString(group).documentElement.getElementsByTagName("primitive"))
 
@@ -1751,11 +1739,12 @@ def resource_remove(resource_id, output = True):
     )
     dom = utils.get_cib_dom()
     resource_el = utils.dom_get_resource(dom, resource_id)
+    remote_node_name = None
     if resource_el:
-        remote_node = utils.dom_get_resource_remote_node_name(resource_el)
-        if remote_node:
+        remote_node_name = utils.dom_get_resource_remote_node_name(resource_el)
+        if remote_node_name:
             dom = constraint.remove_constraints_containing_node(
-                dom, remote_node, output
+                dom, remote_node_name, output
             )
             utils.replace_cib_configuration(dom)
             dom = utils.get_cib_dom()
@@ -1821,6 +1810,10 @@ def resource_remove(resource_id, output = True):
             if output == True:
                 utils.err("Unable to remove resource '%s' (do constraints exist?)" % (resource_id))
             return False
+    if remote_node_name and not utils.usefile:
+        output, retval = utils.run([
+            "crm_node", "--force", "--remove", remote_node_name
+        ])
     return True
 
 def stonith_level_rm_device(cib_dom, stn_id):
@@ -2017,6 +2010,17 @@ def resource_group_list(argv):
         print(" ".join(line_parts))
 
 def resource_show(argv, stonith=False):
+    mutually_exclusive_opts = ("--full", "--groups", "--hide-inactive")
+    modifiers = [
+        key for key in utils.pcs_options if key in mutually_exclusive_opts
+    ]
+    if (len(modifiers) > 1) or (argv and modifiers):
+        utils.err(
+            "you can specify only one of resource id, {0}".format(
+                ", ".join(mutually_exclusive_opts)
+            )
+        )
+
     if "--groups" in utils.pcs_options:
         resource_group_list(argv)
         return
@@ -2033,15 +2037,28 @@ def resource_show(argv, stonith=False):
         return
 
     if len(argv) == 0:
-        output, retval = utils.run(["crm_mon", "-1", "-r"])
+        monitor_command = ["crm_mon", "--one-shot"]
+        if "--hide-inactive" not in utils.pcs_options:
+            monitor_command.append('--inactive')
+        output, retval = utils.run(monitor_command)
         if retval != 0:
             utils.err("unable to get cluster status from crm_mon\n"+output.rstrip())
         preg = re.compile(r'.*(stonith:.*)')
         resources_header = False
         in_resources = False
         has_resources = False
+        no_resources_line = (
+            "NO stonith devices configured" if stonith
+            else "NO resources configured"
+        )
         for line in output.split('\n'):
-            if line == "Full list of resources:":
+            if line == "No active resources":
+                print(line)
+                return
+            if line == "No resources":
+                print(no_resources_line)
+                return
+            if line in ("Full list of resources:", "Active resources:"):
                 resources_header = True
                 continue
             if line == "":
@@ -2050,10 +2067,7 @@ def resource_show(argv, stonith=False):
                     in_resources = True
                 elif in_resources:
                     if not has_resources:
-                        if not stonith:
-                            print("NO resources configured")
-                        else:
-                            print("NO stonith devices configured")
+                        print(no_resources_line)
                     return
                 continue
             if in_resources:
@@ -2444,7 +2458,7 @@ def set_default(def_type, argv):
         if (len(args) != 2):
             print("Invalid Property: " + arg)
             continue
-        utils.setAttribute(def_type, args[0], args[1])
+        utils.setAttribute(def_type, args[0], args[1], exit_on_error=True)
 
 def print_node(node, tab = 0):
     spaces = " " * tab
@@ -2798,8 +2812,7 @@ def set_resource_utilization(resource_id, argv):
     resource_el = utils.dom_get_resource(cib, resource_id)
     if resource_el is None:
         utils.err("Unable to find a resource: {0}".format(resource_id))
-
-    utils.dom_update_utilization(resource_el, utils.convert_args_to_tuples(argv))
+    utils.dom_update_utilization(resource_el, prepare_options(argv))
     utils.replace_cib_configuration(cib)
 
 def print_resource_utilization(resource_id):
diff --git a/pcs/settings.py.debian b/pcs/settings.py.debian
index a49e123..90ef51d 100644
--- a/pcs/settings.py.debian
+++ b/pcs/settings.py.debian
@@ -4,3 +4,4 @@ crmd_binary = "/usr/lib/DEB_HOST_MULTIARCH/pacemaker/crmd"
 cib_binary = "/usr/lib/DEB_HOST_MULTIARCH/pacemaker/cib"
 stonithd_binary = "/usr/lib/DEB_HOST_MULTIARCH/pacemaker/stonithd"
 pcsd_exec_location = "/usr/share/pcsd/"
+sbd_config = "/etc/default/sbd"
diff --git a/pcs/settings_default.py b/pcs/settings_default.py
index 89b4d0e..84eeacc 100644
--- a/pcs/settings_default.py
+++ b/pcs/settings_default.py
@@ -22,7 +22,7 @@ pengine_binary = "/usr/libexec/pacemaker/pengine"
 crmd_binary = "/usr/libexec/pacemaker/crmd"
 cib_binary = "/usr/libexec/pacemaker/cib"
 stonithd_binary = "/usr/libexec/pacemaker/stonithd"
-pcs_version = "0.9.153"
+pcs_version = "0.9.154"
 crm_report = pacemaker_binaries + "crm_report"
 crm_verify = pacemaker_binaries + "crm_verify"
 crm_mon_schema = '/usr/share/pacemaker/crm_mon.rng'
@@ -41,3 +41,5 @@ nagios_metadata_path = "/usr/share/pacemaker/nagios/plugins-metadata/"
 sbd_watchdog_default = "/dev/watchdog"
 sbd_config = "/etc/sysconfig/sbd"
 pacemaker_wait_timeout_status = 62
+booth_config_dir = "/etc/booth"
+booth_binary = "/usr/sbin/booth"
diff --git a/pcs/status.py b/pcs/status.py
index 0e5e0e7..86216ea 100644
--- a/pcs/status.py
+++ b/pcs/status.py
@@ -13,6 +13,9 @@ from pcs import (
     usage,
     utils,
 )
+from pcs.qdevice import qdevice_status_cmd
+from pcs.quorum import quorum_status_cmd
+from pcs.cli.common.errors import CmdLineInputError
 from pcs.lib.errors import LibraryError
 from pcs.lib.pacemaker_state import ClusterState
 
@@ -38,6 +41,28 @@ def status_cmd(argv):
         xml_status()
     elif (sub_cmd == "corosync"):
         corosync_status()
+    elif sub_cmd == "qdevice":
+        try:
+            qdevice_status_cmd(
+                utils.get_library_wrapper(),
+                argv,
+                utils.get_modificators()
+            )
+        except LibraryError as e:
+            utils.process_library_reports(e.args)
+        except CmdLineInputError as e:
+            utils.exit_on_cmdline_input_errror(e, "status", sub_cmd)
+    elif sub_cmd == "quorum":
+        try:
+            quorum_status_cmd(
+                utils.get_library_wrapper(),
+                argv,
+                utils.get_modificators()
+            )
+        except LibraryError as e:
+            utils.process_library_reports(e.args)
+        except CmdLineInputError as e:
+            utils.exit_on_cmdline_input_errror(e, "status", sub_cmd)
     else:
         usage.status()
         sys.exit(1)
@@ -66,13 +91,19 @@ def full_status():
     if utils.stonithCheck():
         print("WARNING: no stonith devices and stonith-enabled is not false")
 
-    if not utils.is_rhel6() and utils.corosyncPacemakerNodeCheck():
+    if (
+        not utils.usefile
+        and
+        not utils.is_rhel6()
+        and
+        utils.corosyncPacemakerNodeCheck()
+    ):
         print("WARNING: corosync and pacemaker node names do not match (IPs used in setup?)")
 
     print(output)
 
     if not utils.usefile:
-        if  "--full" in utils.pcs_options:
+        if  "--full" in utils.pcs_options and utils.hasCorosyncConf():
             print_pcsd_daemon_status()
             print()
         utils.serviceStatus("  ")
@@ -90,7 +121,10 @@ def nodes_status(argv):
         return
 
     if len(argv) == 1 and (argv[0] == "config"):
-        corosync_nodes = utils.getNodesFromCorosyncConf()
+        if utils.hasCorosyncConf():
+            corosync_nodes = utils.getNodesFromCorosyncConf()
+        else:
+            corosync_nodes = []
         try:
             pacemaker_nodes = sorted([
                 node.attrs.name for node
@@ -213,7 +247,7 @@ def cluster_status(argv):
         else:
             print("",line)
 
-    if not utils.usefile:
+    if not utils.usefile and utils.hasCorosyncConf():
         print()
         print_pcsd_daemon_status()
 
@@ -231,25 +265,11 @@ def xml_status():
         utils.err("running crm_mon, is pacemaker running?")
     print(output, end="")
 
-def is_cman_running():
-    if utils.is_systemctl():
-        dummy_output, retval = utils.run(["systemctl", "status", "cman.service"])
-    else:
-        dummy_output, retval = utils.run(["service", "cman", "status"])
-    return retval == 0
-
-def is_corosyc_running():
-    if utils.is_systemctl():
-        dummy_output, retval = utils.run(["systemctl", "status", "corosync.service"])
-    else:
-        dummy_output, retval = utils.run(["service", "corosync", "status"])
-    return retval == 0
-
-def is_pacemaker_running():
+def is_service_running(service):
     if utils.is_systemctl():
-        dummy_output, retval = utils.run(["systemctl", "status", "pacemaker.service"])
+        dummy_output, retval = utils.run(["systemctl", "status", service])
     else:
-        dummy_output, retval = utils.run(["service", "pacemaker", "status"])
+        dummy_output, retval = utils.run(["service", service, "status"])
     return retval == 0
 
 def print_pcsd_daemon_status():
diff --git a/pcs/stonith.py b/pcs/stonith.py
index ab9e926..0942979 100644
--- a/pcs/stonith.py
+++ b/pcs/stonith.py
@@ -130,7 +130,9 @@ def stonith_list_options(stonith_agent):
         metadata = lib_ra.get_fence_agent_metadata(runner, stonith_agent)
         desc = lib_ra.get_agent_desc(metadata)
         params = lib_ra.get_fence_agent_parameters(runner, metadata)
-        resource.resource_print_options(stonith_agent, desc, params)
+        # Fence agents just list the actions, usually without any attributes.
+        # We could print them but it wouldn't add any usefull information.
+        resource.resource_print_options(stonith_agent, desc, params, actions=[])
     except lib_ra.ResourceAgentLibError as e:
         utils.process_library_reports(
             [lib_ra.resource_agent_lib_error_to_report_item(e)]
@@ -174,7 +176,8 @@ def stonith_create(argv):
         utils.process_library_reports(e.args)
 
     resource.resource_create(
-        stonith_id, "stonith:" + stonith_type, st_values, op_values, meta_values
+        stonith_id, "stonith:" + stonith_type, st_values, op_values, meta_values,
+        group=utils.pcs_options.get("--group", None)
     )
 
 def stonith_level(argv):
@@ -225,7 +228,11 @@ def stonith_level_add(level, node, devices):
         for dev in devices.split(","):
             if not utils.is_stonith_resource(dev):
                 utils.err("%s is not a stonith id (use --force to override)" % dev)
-        if not utils.is_pacemaker_node(node) and not utils.is_corosync_node(node):
+        corosync_nodes = []
+        if utils.hasCorosyncConf():
+            corosync_nodes = utils.getNodesFromCorosyncConf()
+        pacemaker_nodes = utils.getNodesFromPacemaker()
+        if node not in corosync_nodes and node not in pacemaker_nodes:
             utils.err("%s is not currently a node (use --force to override)" % node)
 
     ft = dom.getElementsByTagName("fencing-topology")
@@ -321,6 +328,10 @@ def stonith_level_clear(node = None):
 
 def stonith_level_verify():
     dom = utils.get_cib_dom()
+    corosync_nodes = []
+    if utils.hasCorosyncConf():
+        corosync_nodes = utils.getNodesFromCorosyncConf()
+    pacemaker_nodes = utils.getNodesFromPacemaker()
 
     fls = dom.getElementsByTagName("fencing-level")
     for fl in fls:
@@ -329,7 +340,7 @@ def stonith_level_verify():
         for dev in devices.split(","):
             if not utils.is_stonith_resource(dev):
                 utils.err("%s is not a stonith id" % dev)
-        if not utils.is_corosync_node(node) and not utils.is_pacemaker_node(node):
+        if node not in corosync_nodes and node not in pacemaker_nodes:
             utils.err("%s is not currently a node" % node)
 
 def stonith_level_show():
@@ -486,7 +497,7 @@ def _sbd_parse_watchdogs(watchdog_list):
     for watchdog_node in watchdog_list:
         if "@" not in watchdog_node:
             if default_watchdog:
-                raise CmdLineInputError("Multiple default watchdogs.")
+                raise CmdLineInputError("Multiple watchdog definitions.")
             default_watchdog = watchdog_node
         else:
             watchdog, node_name = watchdog_node.rsplit("@", 1)
@@ -544,7 +555,7 @@ def sbd_config(lib, argv, modifiers):
 
     config = config_list[0]["config"]
 
-    filtered_options = ["SBD_WATCHDOG_DEV", "SBD_OPTS"]
+    filtered_options = ["SBD_WATCHDOG_DEV", "SBD_OPTS", "SBD_PACEMAKER"]
     for key, val in config.items():
         if key in filtered_options:
             continue
diff --git a/pcs/test/resources/.gitignore b/pcs/test/resources/.gitignore
index 8c710cf..b0434e7 100644
--- a/pcs/test/resources/.gitignore
+++ b/pcs/test/resources/.gitignore
@@ -1,2 +1,3 @@
 *.tmp
 temp*.xml
+temp-*
diff --git a/pcs/test/resources/corosync-qdevice.conf b/pcs/test/resources/corosync-qdevice.conf
new file mode 100644
index 0000000..38998e7
--- /dev/null
+++ b/pcs/test/resources/corosync-qdevice.conf
@@ -0,0 +1,34 @@
+totem {
+    version: 2
+    secauth: off
+    cluster_name: test99
+    transport: udpu
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+
+    device {
+        model: net
+
+        net {
+            host: 127.0.0.1
+        }
+    }
+}
+
+logging {
+    to_syslog: yes
+}
diff --git a/pcs/test/resources/tmp_keyfile b/pcs/test/resources/tmp_keyfile
new file mode 100644
index 0000000..6b584e8
--- /dev/null
+++ b/pcs/test/resources/tmp_keyfile
@@ -0,0 +1 @@
+content
\ No newline at end of file
diff --git a/pcs/test/suite.py b/pcs/test/suite.py
index 5b29918..b6c7be2 100755
--- a/pcs/test/suite.py
+++ b/pcs/test/suite.py
@@ -9,19 +9,12 @@ from __future__ import (
 import sys
 import os.path
 
-major, minor = sys.version_info[:2]
-if major == 2 and minor == 6:
-    import unittest2 as unittest
-else:
-    import unittest
-
-
 PACKAGE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
     os.path.abspath(__file__)
 )))
+sys.path.insert(0, PACKAGE_DIR)
 
-def put_package_to_path():
-    sys.path.insert(0, PACKAGE_DIR)
+from pcs.test.tools import pcs_unittest as unittest
 
 def prepare_test_name(test_name):
     """
@@ -65,18 +58,17 @@ def discover_tests(explicitly_enumerated_tests, exclude_enumerated_tests=False):
     return unittest.TestLoader().loadTestsFromNames(explicitly_enumerated_tests)
 
 def run_tests(tests, verbose=False, color=False):
-    resultclass = unittest.runner.TextTestResult
+    resultclass = unittest.TextTestResult
     if color:
         from pcs.test.tools.color_text_runner import ColorTextTestResult
         resultclass = ColorTextTestResult
 
-    testRunner = unittest.runner.TextTestRunner(
+    testRunner = unittest.TextTestRunner(
         verbosity=2 if verbose else 1,
         resultclass=resultclass
     )
     return testRunner.run(tests)
 
-put_package_to_path()
 explicitly_enumerated_tests = [
     prepare_test_name(arg) for arg in sys.argv[1:] if arg not in (
         "-v",
diff --git a/pcs/test/test_acl.py b/pcs/test/test_acl.py
index b053614..186c035 100644
--- a/pcs/test/test_acl.py
+++ b/pcs/test/test_acl.py
@@ -6,7 +6,7 @@ from __future__ import (
 )
 
 import shutil
-import unittest
+from pcs.test.tools import pcs_unittest as unittest
 
 from pcs.test.tools.assertions import AssertPcsMixin
 from pcs.test.tools.misc import (
diff --git a/pcs/test/test_alert.py b/pcs/test/test_alert.py
index 905dc9f..d919ff6 100644
--- a/pcs/test/test_alert.py
+++ b/pcs/test/test_alert.py
@@ -7,7 +7,6 @@ from __future__ import (
 )
 
 import shutil
-import sys
 
 from pcs.test.tools.misc import (
     get_test_resource as rc,
@@ -15,12 +14,7 @@ from pcs.test.tools.misc import (
 )
 from pcs.test.tools.assertions import AssertPcsMixin
 from pcs.test.tools.pcs_runner import PcsRunner
-
-major, minor = sys.version_info[:2]
-if major == 2 and minor == 6:
-    import unittest2 as unittest
-else:
-    import unittest
+from pcs.test.tools import pcs_unittest as unittest
 
 
 old_cib = rc("cib-empty.xml")
@@ -239,19 +233,19 @@ Alerts:
  Alert: alert (path=test)
 """
         )
-        self.assert_pcs_success("alert recipient add alert rec_value")
+        self.assert_pcs_success("alert recipient add alert value=rec_value")
         self.assert_pcs_success(
             "alert config",
             """\
 Alerts:
  Alert: alert (path=test)
   Recipients:
-   Recipient: rec_value
+   Recipient: alert-recipient (value=rec_value)
 """
         )
         self.assert_pcs_success(
-            "alert recipient add alert rec_value2 description=description "
-            "options o1=1 o2=2 meta m1=v1 m2=v2"
+            "alert recipient add alert value=rec_value2 id=my-recipient "
+            "description=description options o1=1 o2=2 meta m1=v1 m2=v2"
         )
         self.assert_pcs_success(
             "alert config",
@@ -259,35 +253,77 @@ Alerts:
 Alerts:
  Alert: alert (path=test)
   Recipients:
-   Recipient: rec_value
-   Recipient: rec_value2
+   Recipient: alert-recipient (value=rec_value)
+   Recipient: my-recipient (value=rec_value2)
     Description: description
     Options: o1=1 o2=2
     Meta options: m1=v1 m2=v2
 """
         )
 
-    def test_no_alert(self):
+    def test_already_exists(self):
+        self.assert_pcs_success("alert create path=test")
+        self.assert_pcs_success(
+            "alert recipient add alert value=rec_value id=rec"
+        )
         self.assert_pcs_fail(
-            "alert recipient add alert rec_value",
-            "Error: Alert 'alert' not found.\n"
+            "alert recipient add alert value=value id=rec",
+            "Error: 'rec' already exists\n"
+        )
+        self.assert_pcs_fail(
+            "alert recipient add alert value=value id=alert",
+            "Error: 'alert' already exists\n"
         )
 
-    def test_already_exists(self):
+    def test_same_value(self):
         self.assert_pcs_success("alert create path=test")
-        self.assert_pcs_success("alert recipient add alert rec_value")
+        self.assert_pcs_success(
+            "alert recipient add alert value=rec_value id=rec"
+        )
         self.assert_pcs_fail(
-            "alert recipient add alert rec_value",
-            "Error: Recipient 'rec_value' in alert 'alert' already exists.\n"
+            "alert recipient add alert value=rec_value",
+            "Error: Recipient 'rec_value' in alert 'alert' already exists, "
+            "use --force to override\n"
+        )
+        self.assert_pcs_success(
+            "alert config",
+            """\
+Alerts:
+ Alert: alert (path=test)
+  Recipients:
+   Recipient: rec (value=rec_value)
+"""
+        )
+        self.assert_pcs_success(
+            "alert recipient add alert value=rec_value --force",
+            "Warning: Recipient 'rec_value' in alert 'alert' already exists\n"
+        )
+        self.assert_pcs_success(
+            "alert config",
+            """\
+Alerts:
+ Alert: alert (path=test)
+  Recipients:
+   Recipient: rec (value=rec_value)
+   Recipient: alert-recipient (value=rec_value)
+"""
+        )
+
+    def test_no_value(self):
+        self.assert_pcs_success("alert create path=test")
+        self.assert_pcs_fail(
+            "alert recipient add alert id=rec",
+            "Error: required option 'value' is missing\n"
         )
 
 
+
 @unittest.skipUnless(ALERTS_SUPPORTED, ALERTS_NOT_SUPPORTED_MSG)
 class UpdateRecipientAlert(PcsAlertTest):
     def test_success(self):
         self.assert_pcs_success("alert create path=test")
         self.assert_pcs_success(
-            "alert recipient add alert rec_value description=description "
+            "alert recipient add alert value=rec_value description=description "
             "options o1=1 o2=2 meta m1=v1 m2=v2"
         )
         self.assert_pcs_success(
@@ -296,14 +332,14 @@ class UpdateRecipientAlert(PcsAlertTest):
 Alerts:
  Alert: alert (path=test)
   Recipients:
-   Recipient: rec_value
+   Recipient: alert-recipient (value=rec_value)
     Description: description
     Options: o1=1 o2=2
     Meta options: m1=v1 m2=v2
 """
         )
         self.assert_pcs_success(
-            "alert recipient update alert rec_value description=desc "
+            "alert recipient update alert-recipient value=new description=desc "
             "options o1= o2=v2 o3=3 meta m1= m2=2 m3=3"
         )
         self.assert_pcs_success(
@@ -312,24 +348,101 @@ Alerts:
 Alerts:
  Alert: alert (path=test)
   Recipients:
-   Recipient: rec_value
+   Recipient: alert-recipient (value=new)
+    Description: desc
+    Options: o2=v2 o3=3
+    Meta options: m2=2 m3=3
+"""
+        )
+        self.assert_pcs_success(
+            "alert recipient update alert-recipient value=new"
+        )
+        self.assert_pcs_success(
+            "alert config",
+            """\
+Alerts:
+ Alert: alert (path=test)
+  Recipients:
+   Recipient: alert-recipient (value=new)
     Description: desc
     Options: o2=v2 o3=3
     Meta options: m2=2 m3=3
 """
         )
 
-    def test_no_alert(self):
+    def test_value_exists(self):
+        self.assert_pcs_success("alert create path=test")
+        self.assert_pcs_success("alert recipient add alert value=rec_value")
+        self.assert_pcs_success("alert recipient add alert value=value")
+        self.assert_pcs_success(
+            "alert config",
+            """\
+Alerts:
+ Alert: alert (path=test)
+  Recipients:
+   Recipient: alert-recipient (value=rec_value)
+   Recipient: alert-recipient-1 (value=value)
+"""
+        )
         self.assert_pcs_fail(
-            "alert recipient update alert rec_value description=desc",
-            "Error: Alert 'alert' not found.\n"
+            "alert recipient update alert-recipient value=value",
+            "Error: Recipient 'value' in alert 'alert' already exists, "
+            "use --force to override\n"
+        )
+        self.assert_pcs_success(
+            "alert recipient update alert-recipient value=value --force",
+            "Warning: Recipient 'value' in alert 'alert' already exists\n"
+        )
+        self.assert_pcs_success(
+            "alert config",
+            """\
+Alerts:
+ Alert: alert (path=test)
+  Recipients:
+   Recipient: alert-recipient (value=value)
+   Recipient: alert-recipient-1 (value=value)
+"""
+        )
+
+    def test_value_same_as_previous(self):
+        self.assert_pcs_success("alert create path=test")
+        self.assert_pcs_success("alert recipient add alert value=rec_value")
+        self.assert_pcs_success(
+            "alert config",
+            """\
+Alerts:
+ Alert: alert (path=test)
+  Recipients:
+   Recipient: alert-recipient (value=rec_value)
+"""
+        )
+        self.assert_pcs_success(
+            "alert recipient update alert-recipient value=rec_value"
+        )
+        self.assert_pcs_success(
+            "alert config",
+            """\
+Alerts:
+ Alert: alert (path=test)
+  Recipients:
+   Recipient: alert-recipient (value=rec_value)
+"""
         )
 
     def test_no_recipient(self):
+        self.assert_pcs_fail(
+            "alert recipient update rec description=desc",
+            "Error: Recipient 'rec' does not exist\n"
+        )
+
+    def test_empty_value(self):
         self.assert_pcs_success("alert create path=test")
+        self.assert_pcs_success(
+            "alert recipient add alert value=rec_value id=rec"
+        )
         self.assert_pcs_fail(
-            "alert recipient update alert rec_value description=desc",
-            "Error: Recipient 'rec_value' not found in alert 'alert'.\n"
+            "alert recipient update rec value=",
+            "Error: Recipient value '' is not valid.\n"
         )
 
 
@@ -337,27 +450,29 @@ Alerts:
 class RemoveRecipientTest(PcsAlertTest):
     def test_success(self):
         self.assert_pcs_success("alert create path=test")
-        self.assert_pcs_success("alert recipient add alert rec_value")
+        self.assert_pcs_success(
+            "alert recipient add alert value=rec_value id=rec"
+        )
         self.assert_pcs_success(
             "alert config",
             """\
 Alerts:
  Alert: alert (path=test)
   Recipients:
-   Recipient: rec_value
+   Recipient: rec (value=rec_value)
 """
         )
-        self.assert_pcs_success("alert recipient remove alert rec_value")
-
-    def test_no_alert(self):
-        self.assert_pcs_fail(
-            "alert recipient remove alert rec_value",
-            "Error: Alert 'alert' not found.\n"
+        self.assert_pcs_success("alert recipient remove rec")
+        self.assert_pcs_success(
+            "alert config",
+            """\
+Alerts:
+ Alert: alert (path=test)
+"""
         )
 
     def test_no_recipient(self):
-        self.assert_pcs_success("alert create path=test")
         self.assert_pcs_fail(
-            "alert recipient remove alert rec_value",
-            "Error: Recipient 'rec_value' not found in alert 'alert'.\n"
+            "alert recipient remove rec",
+            "Error: Recipient 'rec' does not exist\n"
         )
diff --git a/pcs/test/test_booth.py b/pcs/test/test_booth.py
new file mode 100644
index 0000000..c12391b
--- /dev/null
+++ b/pcs/test/test_booth.py
@@ -0,0 +1,420 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import os
+import shutil
+
+from pcs.test.tools import pcs_unittest as unittest
+from pcs.test.tools.assertions import AssertPcsMixin, console_report
+from pcs.test.tools.misc import get_test_resource as rc
+from pcs.test.tools.pcs_runner import PcsRunner
+from pcs import settings
+
+
+EMPTY_CIB = rc("cib-empty.xml")
+TEMP_CIB = rc("temp-cib.xml")
+
+BOOTH_CONFIG_FILE = rc("temp-booth.cfg")
+BOOTH_KEY_FILE = rc("temp-booth.key")
+
+BOOTH_RESOURCE_AGENT_INSTALLED = "booth-site" in os.listdir(
+    os.path.join(settings.ocf_resources, "pacemaker")
+)
+need_booth_resource_agent = unittest.skipUnless(
+    BOOTH_RESOURCE_AGENT_INSTALLED,
+    "test requires resource agent ocf:pacemaker:booth-site"
+    " which is not istalled"
+)
+
+
+def fake_file(command):
+    return "{0} --booth-conf={1} --booth-key={2}".format(
+        command,
+        BOOTH_CONFIG_FILE,
+        BOOTH_KEY_FILE,
+    )
+
+def ensure_booth_config_exists():
+    if not os.path.exists(BOOTH_CONFIG_FILE):
+        with open(BOOTH_CONFIG_FILE, "w") as config_file:
+            config_file.write("")
+
+def ensure_booth_config_not_exists():
+    if os.path.exists(BOOTH_CONFIG_FILE):
+        os.remove(BOOTH_CONFIG_FILE)
+    if os.path.exists(BOOTH_KEY_FILE):
+        os.remove(BOOTH_KEY_FILE)
+
+class BoothMixin(AssertPcsMixin):
+    def setUp(self):
+        shutil.copy(EMPTY_CIB, TEMP_CIB)
+        self.pcs_runner = PcsRunner(TEMP_CIB)
+
+    def assert_pcs_success(self, command, *args, **kwargs):
+        return super(BoothMixin, self).assert_pcs_success(
+            fake_file(command), *args, **kwargs
+        )
+
+    def assert_pcs_fail(self, command, *args, **kwargs):
+        return super(BoothMixin, self).assert_pcs_fail(
+            fake_file(command), *args, **kwargs
+        )
+
+    def assert_pcs_fail_original(self, *args, **kwargs):
+        return super(BoothMixin, self).assert_pcs_fail(*args, **kwargs)
+
+class SetupTest(BoothMixin, unittest.TestCase):
+    def test_sucess_setup_booth_config(self):
+        ensure_booth_config_not_exists()
+        self.assert_pcs_success(
+            "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3"
+        )
+        self.assert_pcs_success(
+            "booth config",
+            stdout_full=console_report(
+                "authfile = {0}".format(BOOTH_KEY_FILE),
+                "site = 1.1.1.1",
+                "site = 2.2.2.2",
+                "arbitrator = 3.3.3.3",
+            )
+        )
+        with open(BOOTH_KEY_FILE) as key_file:
+            self.assertEqual(64, len(key_file.read()))
+
+
+    def test_fail_when_config_exists_already(self):
+        ensure_booth_config_exists()
+        try:
+            self.assert_pcs_fail(
+                "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3",
+                (
+                    "Error: booth config file {0} already exists, use --force"
+                    " to override\n"
+                ).format(BOOTH_CONFIG_FILE)
+            )
+        finally:
+            if os.path.exists(BOOTH_CONFIG_FILE):
+                os.remove(BOOTH_CONFIG_FILE)
+
+    def test_warn_when_config_file_exists_already_but_is_forced(self):
+        ensure_booth_config_exists()
+        self.assert_pcs_success(
+            "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3 --force",
+            stdout_full=[
+                "Warning: booth config file"
+                    " {0} already exists".format(BOOTH_CONFIG_FILE)
+                ,
+                "Warning: booth key file"
+                    " {0} already exists".format(BOOTH_KEY_FILE)
+                ,
+            ]
+        )
+        ensure_booth_config_not_exists()
+
+
+    def test_fail_on_multiple_reasons(self):
+        self.assert_pcs_fail(
+            "booth setup sites 1.1.1.1 arbitrators 1.1.1.1 2.2.2.2 3.3.3.3",
+            console_report(
+                "Error: lack of sites for booth configuration (need 2 at least)"
+                    ": sites 1.1.1.1"
+                ,
+                "Error: odd number of peers is required (entered 4 peers)",
+                "Error: duplicate address for booth configuration: 1.1.1.1",
+            )
+        )
+
+    def test_refuse_partialy_mocked_environment(self):
+        self.assert_pcs_fail_original(
+            "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3"
+                " --booth-conf=/some/file" #no --booth-key!
+            ,
+            "Error: With --booth-conf must be specified --booth-key as well\n"
+        )
+        self.assert_pcs_fail_original(
+            "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3"
+                " --booth-key=/some/file" #no --booth-conf!
+            ,
+            "Error: With --booth-key must be specified --booth-conf as well\n"
+        )
+
+    def test_show_usage_when_no_site_specified(self):
+        self.assert_pcs_fail("booth setup arbitrators 3.3.3.3", stdout_start=[
+            "",
+            "Usage: pcs booth <command>"
+        ])
+
+
+class DestroyTest(BoothMixin, unittest.TestCase):
+    def test_failed_when_using_mocked_booth_env(self):
+        self.assert_pcs_fail(
+            "booth destroy",
+            "Error: This command does not support --booth-conf, --booth-key\n"
+        )
+
+    @need_booth_resource_agent
+    def test_failed_when_booth_in_cib(self):
+        ensure_booth_config_not_exists()
+        name = " --name=some-weird-booth-name"
+        self.assert_pcs_success(
+            "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3" + name
+        )
+        self.assert_pcs_success("booth create ip 1.1.1.1" + name)
+        self.assert_pcs_fail_original(
+            "booth destroy" + name,
+            #If there is booth at some-weird-booth-name in systemd (enabled or
+            #started) the message continue with it because destroy command works
+            #always on live environment. "Cleaner" solution takes more effort
+            #than what it's worth
+            stdout_start=(
+                "Error: booth instance 'some-weird-booth-name' is used in"
+                " cluster resource\n"
+            ),
+        )
+
+class BoothTest(unittest.TestCase, BoothMixin):
+    def setUp(self):
+        shutil.copy(EMPTY_CIB, TEMP_CIB)
+        self.pcs_runner = PcsRunner(TEMP_CIB)
+        ensure_booth_config_not_exists()
+        self.assert_pcs_success(
+            "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3"
+        )
+
+class AddTicketTest(BoothTest):
+    def test_success_add_ticket(self):
+        self.assert_pcs_success("booth ticket add TicketA expire=10")
+        self.assert_pcs_success("booth config", stdout_full=console_report(
+            "authfile = {0}".format(BOOTH_KEY_FILE),
+            "site = 1.1.1.1",
+            "site = 2.2.2.2",
+            "arbitrator = 3.3.3.3",
+            'ticket = "TicketA"',
+            "  expire = 10",
+        ))
+
+    def test_fail_on_bad_ticket_name(self):
+        self.assert_pcs_fail(
+            "booth ticket add @TicketA",
+            "Error: booth ticket name '@TicketA' is not valid, use alphanumeric"
+            " chars or dash\n"
+        )
+
+    def test_fail_on_duplicit_ticket_name(self):
+        self.assert_pcs_success("booth ticket add TicketA")
+        self.assert_pcs_fail(
+            "booth ticket add TicketA",
+            "Error: booth ticket name 'TicketA' already exists in configuration"
+            "\n"
+        )
+
+    def test_fail_on_invalid_options(self):
+        self.assert_pcs_fail(
+            "booth ticket add TicketA site=a timeout=", console_report(
+                "Error: invalid booth ticket option 'site', allowed options"
+                    " are: acquire-after, attr-prereq, before-acquire-handler,"
+                    " expire, renewal-freq, retries, timeout, weights"
+                ,
+                "Error: '' is not a valid timeout value, use no-empty",
+            )
+        )
+
+    def test_forceable_fail_on_unknown_options(self):
+        msg = (
+            "invalid booth ticket option 'unknown', allowed options"
+            " are: acquire-after, attr-prereq, before-acquire-handler,"
+            " expire, renewal-freq, retries, timeout, weights"
+        )
+        self.assert_pcs_fail(
+            "booth ticket add TicketA unknown=a", console_report(
+                "Error: "+msg+", use --force to override",
+            )
+        )
+        self.assert_pcs_success(
+            "booth ticket add TicketA unknown=a --force",
+            "Warning: {0}\n".format(msg),
+        )
+
+
+class RemoveTicketTest(BoothTest):
+    def test_success_remove_ticket(self):
+        self.assert_pcs_success("booth ticket add TicketA")
+        self.assert_pcs_success("booth config", stdout_full=console_report(
+            "authfile = {0}".format(BOOTH_KEY_FILE),
+            "site = 1.1.1.1",
+            "site = 2.2.2.2",
+            "arbitrator = 3.3.3.3",
+            'ticket = "TicketA"',
+        ))
+        self.assert_pcs_success("booth ticket remove TicketA")
+        self.assert_pcs_success("booth config", stdout_full=console_report(
+            "authfile = {0}".format(BOOTH_KEY_FILE),
+            "site = 1.1.1.1",
+            "site = 2.2.2.2",
+            "arbitrator = 3.3.3.3",
+        ))
+
+    def test_fail_when_ticket_does_not_exist(self):
+        self.assert_pcs_fail(
+            "booth ticket remove TicketA",
+            "Error: booth ticket name 'TicketA' does not exist\n"
+        )
+
+ at need_booth_resource_agent
+class CreateTest(BoothTest):
+    def test_sucessfully_create_booth_resource_group(self):
+        self.assert_pcs_success("resource show", "NO resources configured\n")
+        self.assert_pcs_success("booth create ip 192.168.122.120")
+        self.assert_pcs_success("resource show", [
+             " Resource Group: booth-booth-group",
+             "     booth-booth-ip	(ocf::heartbeat:IPaddr2):	Stopped",
+             "     booth-booth-service	(ocf::pacemaker:booth-site):	Stopped",
+        ])
+        self.assert_pcs_success("resource show booth-booth-ip", [
+             " Resource: booth-booth-ip (class=ocf provider=heartbeat type=IPaddr2)",
+             "  Attributes: ip=192.168.122.120",
+             "  Operations: start interval=0s timeout=20s (booth-booth-ip-start-interval-0s)",
+             "              stop interval=0s timeout=20s (booth-booth-ip-stop-interval-0s)",
+             "              monitor interval=10s timeout=20s (booth-booth-ip-monitor-interval-10s)",
+        ])
+
+    def test_refuse_create_booth_when_config_is_already_in_use(self):
+        self.assert_pcs_success("booth create ip 192.168.122.120")
+        self.assert_pcs_fail("booth create ip 192.168.122.121", [
+            "Error: booth instance 'booth' is already created as cluster"
+                " resource"
+        ])
+
+ at need_booth_resource_agent
+class RemoveTest(BoothTest):
+    def test_failed_when_no_booth_configuration_created(self):
+        self.assert_pcs_success("resource show", "NO resources configured\n")
+        self.assert_pcs_fail("booth remove", [
+            "Error: booth instance 'booth' not found in cib"
+        ])
+
+    def test_failed_when_multiple_booth_configuration_created(self):
+        self.assert_pcs_success("resource show", "NO resources configured\n")
+        self.assert_pcs_success("booth create ip 192.168.122.120")
+        self.assert_pcs_success(
+            "resource create some-id ocf:pacemaker:booth-site"
+            " config=/etc/booth/booth.conf"
+        )
+        self.assert_pcs_success("resource show", [
+             " Resource Group: booth-booth-group",
+             "     booth-booth-ip	(ocf::heartbeat:IPaddr2):	Stopped",
+             "     booth-booth-service	(ocf::pacemaker:booth-site):	Stopped",
+             " some-id	(ocf::pacemaker:booth-site):	Stopped",
+        ])
+        self.assert_pcs_fail("booth remove", [
+            "Error: found more than one booth instance 'booth' in cib, use"
+            " --force to override"
+        ])
+
+    def test_remove_added_booth_configuration(self):
+        self.assert_pcs_success("resource show", "NO resources configured\n")
+        self.assert_pcs_success("booth create ip 192.168.122.120")
+        self.assert_pcs_success("resource show", [
+             " Resource Group: booth-booth-group",
+             "     booth-booth-ip	(ocf::heartbeat:IPaddr2):	Stopped",
+             "     booth-booth-service	(ocf::pacemaker:booth-site):	Stopped",
+        ])
+        self.assert_pcs_success("booth remove", [
+            "Deleting Resource - booth-booth-ip",
+            "Deleting Resource (and group) - booth-booth-service",
+        ])
+        self.assert_pcs_success("resource show", "NO resources configured\n")
+
+
+    def test_remove_multiple_booth_configuration(self):
+        self.assert_pcs_success("resource show", "NO resources configured\n")
+        self.assert_pcs_success("booth create ip 192.168.122.120")
+        self.assert_pcs_success(
+            "resource create some-id ocf:pacemaker:booth-site"
+            " config=/etc/booth/booth.conf"
+        )
+        self.assert_pcs_success("resource show", [
+             " Resource Group: booth-booth-group",
+             "     booth-booth-ip	(ocf::heartbeat:IPaddr2):	Stopped",
+             "     booth-booth-service	(ocf::pacemaker:booth-site):	Stopped",
+             " some-id	(ocf::pacemaker:booth-site):	Stopped",
+        ])
+        self.assert_pcs_success("booth remove --force", [
+            "Warning: found more than one booth instance 'booth' in cib",
+            "Deleting Resource - booth-booth-ip",
+            "Deleting Resource (and group) - booth-booth-service",
+            "Deleting Resource - some-id",
+        ])
+
+
+class TicketGrantTest(BoothTest):
+    def test_failed_when_implicit_site_but_not_correct_confgiuration_in_cib(
+        self
+    ):
+        self.assert_pcs_success("booth ticket add T1")
+        #no resource in cib
+        self.assert_pcs_fail("booth ticket grant T1", [
+            "Error: cannot determine local site ip, please specify site"
+                " parameter"
+            ,
+        ])
+
+class TicketRevokeTest(BoothTest):
+    def test_failed_when_implicit_site_but_not_correct_confgiuration_in_cib(
+        self
+    ):
+        self.assert_pcs_success("booth ticket add T1")
+        #no resource in cib
+        self.assert_pcs_fail("booth ticket revoke T1", [
+            "Error: cannot determine local site ip, please specify site"
+                " parameter"
+            ,
+        ])
+
+class ConfigTest(unittest.TestCase, BoothMixin):
+    def setUp(self):
+        shutil.copy(EMPTY_CIB, TEMP_CIB)
+        self.pcs_runner = PcsRunner(TEMP_CIB)
+
+    def test_fail_when_config_file_do_not_exists(self):
+        ensure_booth_config_not_exists()
+        self.assert_pcs_fail(
+            "booth config",
+            "Error: Booth config file '{0}' does not exist\n".format(
+                BOOTH_CONFIG_FILE
+            )
+        )
+
+    def test_too_much_args(self):
+        self.assert_pcs_fail(
+            "booth config nodename surplus",
+            stdout_start="\nUsage: pcs booth <command>\n    config ["
+        )
+
+    def test_show_unsupported_values(self):
+        ensure_booth_config_not_exists()
+        self.assert_pcs_success(
+            "booth setup sites 1.1.1.1 2.2.2.2 arbitrators 3.3.3.3"
+        )
+        with open(BOOTH_CONFIG_FILE, "a") as config_file:
+            config_file.write("some = nonsense")
+        self.assert_pcs_success("booth ticket add TicketA")
+        with open(BOOTH_CONFIG_FILE, "a") as config_file:
+            config_file.write("another = nonsense")
+
+        self.assert_pcs_success(
+            "booth config",
+            stdout_full="\n".join((
+                "authfile = {0}".format(BOOTH_KEY_FILE),
+                "site = 1.1.1.1",
+                "site = 2.2.2.2",
+                "arbitrator = 3.3.3.3",
+                "some = nonsense",
+                'ticket = "TicketA"',
+                "another = nonsense",
+            ))
+        )
diff --git a/pcs/test/test_cluster.py b/pcs/test/test_cluster.py
index 2c3e71b..597e0e4 100644
--- a/pcs/test/test_cluster.py
+++ b/pcs/test/test_cluster.py
@@ -7,7 +7,7 @@ from __future__ import (
 
 import os
 import shutil
-import unittest
+from pcs.test.tools import pcs_unittest as unittest
 
 from pcs.test.tools.assertions import AssertPcsMixin
 from pcs.test.tools.misc import (
@@ -106,7 +106,7 @@ class ClusterTest(unittest.TestCase, AssertPcsMixin):
         self.assertTrue(output.startswith("\nUsage: pcs cluster setup..."))
         self.assertEqual(1, returnVal)
 
-        output, returnVal = pcs(temp_cib, "cluster setup cname rh7-1 rh7-2")
+        output, returnVal = pcs(temp_cib, "cluster setup cname rh7-1.localhost rh7-2.localhost")
         self.assertEqual(
             "Error: A cluster name (--name <name>) is required to setup a cluster\n",
             output
@@ -116,22 +116,22 @@ class ClusterTest(unittest.TestCase, AssertPcsMixin):
     def test_cluster_setup_hostnames_resolving(self):
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --corosync_conf={0} --cluster_conf={1} --name cname nonexistant-address"
+            "cluster setup --local --corosync_conf={0} --cluster_conf={1} --name cname nonexistant-address.invalid"
             .format(corosync_conf_tmp, cluster_conf_tmp)
         )
         ac(output, """\
 Error: Unable to resolve all hostnames, use --force to override
-Warning: Unable to resolve hostname: nonexistant-address
+Warning: Unable to resolve hostname: nonexistant-address.invalid
 """)
         self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --corosync_conf={0} --cluster_conf={1} --name cname nonexistant-address --force"
+            "cluster setup --local --corosync_conf={0} --cluster_conf={1} --name cname nonexistant-address.invalid --force"
             .format(corosync_conf_tmp, cluster_conf_tmp)
         )
         ac(output, """\
-Warning: Unable to resolve hostname: nonexistant-address
+Warning: Unable to resolve hostname: nonexistant-address.invalid
 """)
         self.assertEqual(0, returnVal)
 
@@ -141,7 +141,7 @@ Warning: Unable to resolve hostname: nonexistant-address
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost"
             .format(corosync_conf_tmp)
         )
         self.assertEqual("", output)
@@ -156,12 +156,12 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 2
     }
 }
@@ -183,7 +183,7 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --corosync_conf={0} --name cname rh7-2 rh7-3"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-2.localhost rh7-3.localhost"
             .format(corosync_conf_tmp)
         )
         self.assertEqual("""\
@@ -198,7 +198,7 @@ Error: {0} already exists, use --force to overwrite
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --force --local --corosync_conf={0} --name cname rh7-2 rh7-3"
+            "cluster setup --force --local --corosync_conf={0} --name cname rh7-2.localhost rh7-3.localhost"
             .format(corosync_conf_tmp)
         )
         self.assertEqual("", output)
@@ -215,12 +215,12 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-3
+        ring0_addr: rh7-3.localhost
         nodeid: 2
     }
 }
@@ -243,7 +243,7 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost"
             .format(cluster_conf_tmp)
         )
         self.assertEqual("", output)
@@ -252,17 +252,17 @@ logging {
 <cluster config_version="9" name="cname">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-1" nodeid="1">
+    <clusternode name="rh7-1.localhost" nodeid="1">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-1"/>
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-2" nodeid="2">
+    <clusternode name="rh7-2.localhost" nodeid="2">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-2"/>
+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -283,7 +283,7 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf={0} --name cname rh7-2 rh7-3"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-2.localhost rh7-3.localhost"
             .format(cluster_conf_tmp)
         )
         self.assertEqual("""\
@@ -298,7 +298,7 @@ Error: {0} already exists, use --force to overwrite
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --force --local --cluster_conf={0} --name cname rh7-2 rh7-3"
+            "cluster setup --force --local --cluster_conf={0} --name cname rh7-2.localhost rh7-3.localhost"
             .format(cluster_conf_tmp)
         )
         self.assertEqual("", output)
@@ -309,17 +309,17 @@ Error: {0} already exists, use --force to overwrite
 <cluster config_version="9" name="cname">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-2" nodeid="1">
+    <clusternode name="rh7-2.localhost" nodeid="1">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-2"/>
+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-3" nodeid="2">
+    <clusternode name="rh7-3.localhost" nodeid="2">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-3"/>
+          <device name="pcmk-redirect" port="rh7-3.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -344,7 +344,7 @@ Error: {0} already exists, use --force to overwrite
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost"
             .format(corosync_conf_tmp)
         )
         self.assertEqual("", output)
@@ -361,12 +361,12 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 2
     }
 }
@@ -385,10 +385,10 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode add --corosync_conf={0} rh7-3"
+            "cluster localnode add --corosync_conf={0} rh7-3.localhost"
             .format(corosync_conf_tmp)
         )
-        self.assertEqual("rh7-3: successfully added!\n", output)
+        self.assertEqual("rh7-3.localhost: successfully added!\n", output)
         self.assertEqual(0, returnVal)
         with open(corosync_conf_tmp) as f:
             data = f.read()
@@ -402,17 +402,17 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 2
     }
 
     node {
-        ring0_addr: rh7-3
+        ring0_addr: rh7-3.localhost
         nodeid: 3
     }
 }
@@ -430,11 +430,11 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode remove --corosync_conf={0} rh7-3"
+            "cluster localnode remove --corosync_conf={0} rh7-3.localhost"
             .format(corosync_conf_tmp)
         )
         self.assertEqual(0, returnVal)
-        self.assertEqual("rh7-3: successfully removed!\n", output)
+        self.assertEqual("rh7-3.localhost: successfully removed!\n", output)
         with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
@@ -447,12 +447,12 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 2
     }
 }
@@ -471,10 +471,10 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode add --corosync_conf={0} rh7-3,192.168.1.3"
+            "cluster localnode add --corosync_conf={0} rh7-3.localhost,192.168.1.3"
             .format(corosync_conf_tmp)
         )
-        self.assertEqual("rh7-3,192.168.1.3: successfully added!\n", output)
+        self.assertEqual("rh7-3.localhost,192.168.1.3: successfully added!\n", output)
         self.assertEqual(0, returnVal)
         with open(corosync_conf_tmp) as f:
             data = f.read()
@@ -488,17 +488,17 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 2
     }
 
     node {
-        ring0_addr: rh7-3
+        ring0_addr: rh7-3.localhost
         ring1_addr: 192.168.1.3
         nodeid: 3
     }
@@ -517,11 +517,11 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode remove --corosync_conf={0} rh7-2"
+            "cluster localnode remove --corosync_conf={0} rh7-2.localhost"
             .format(corosync_conf_tmp)
         )
         self.assertEqual(0, returnVal)
-        self.assertEqual("rh7-2: successfully removed!\n", output)
+        self.assertEqual("rh7-2.localhost: successfully removed!\n", output)
         with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
@@ -534,12 +534,12 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-3
+        ring0_addr: rh7-3.localhost
         ring1_addr: 192.168.1.3
         nodeid: 3
     }
@@ -559,11 +559,11 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode remove --corosync_conf={0} rh7-3,192.168.1.3"
+            "cluster localnode remove --corosync_conf={0} rh7-3.localhost,192.168.1.3"
             .format(corosync_conf_tmp)
         )
         self.assertEqual(0, returnVal)
-        self.assertEqual("rh7-3,192.168.1.3: successfully removed!\n", output)
+        self.assertEqual("rh7-3.localhost,192.168.1.3: successfully removed!\n", output)
         with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
@@ -576,7 +576,7 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 }
@@ -601,7 +601,7 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --auto_tie_breaker=1"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --auto_tie_breaker=1"
             .format(corosync_conf_tmp)
         )
         self.assertEqual("", output)
@@ -618,12 +618,12 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 2
     }
 }
@@ -642,10 +642,10 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode add --corosync_conf={0} rh7-3"
+            "cluster localnode add --corosync_conf={0} rh7-3.localhost"
             .format(corosync_conf_tmp)
         )
-        self.assertEqual(output, "rh7-3: successfully added!\n")
+        self.assertEqual(output, "rh7-3.localhost: successfully added!\n")
         self.assertEqual(0, returnVal)
         with open(corosync_conf_tmp) as f:
             data = f.read()
@@ -659,17 +659,17 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 2
     }
 
     node {
-        ring0_addr: rh7-3
+        ring0_addr: rh7-3.localhost
         nodeid: 3
     }
 }
@@ -688,10 +688,10 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode remove --corosync_conf={0} rh7-3"
+            "cluster localnode remove --corosync_conf={0} rh7-3.localhost"
             .format(corosync_conf_tmp)
         )
-        self.assertEqual("rh7-3: successfully removed!\n", output)
+        self.assertEqual("rh7-3.localhost: successfully removed!\n", output)
         self.assertEqual(0, returnVal)
         with open(corosync_conf_tmp) as f:
             data = f.read()
@@ -705,12 +705,12 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 2
     }
 }
@@ -734,7 +734,7 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 rh7-3"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost rh7-3.localhost"
             .format(corosync_conf_tmp)
         )
         self.assertEqual("", output)
@@ -751,17 +751,17 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 2
     }
 
     node {
-        ring0_addr: rh7-3
+        ring0_addr: rh7-3.localhost
         nodeid: 3
     }
 }
@@ -784,7 +784,7 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --transport udp"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --transport udp"
             .format(corosync_conf_tmp)
         )
         self.assertEqual("", output)
@@ -801,12 +801,12 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 2
     }
 }
@@ -834,7 +834,7 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost"
             .format(cluster_conf_tmp)
         )
         ac(output, "")
@@ -845,17 +845,17 @@ logging {
 <cluster config_version="9" name="cname">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-1" nodeid="1">
+    <clusternode name="rh7-1.localhost" nodeid="1">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-1"/>
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-2" nodeid="2">
+    <clusternode name="rh7-2.localhost" nodeid="2">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-2"/>
+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -873,10 +873,10 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode add --cluster_conf={0} rh7-3"
+            "cluster localnode add --cluster_conf={0} rh7-3.localhost"
             .format(cluster_conf_tmp)
         )
-        ac(output, "rh7-3: successfully added!\n")
+        ac(output, "rh7-3.localhost: successfully added!\n")
         self.assertEqual(returnVal, 0)
         with open(cluster_conf_tmp) as f:
             data = f.read()
@@ -884,24 +884,24 @@ logging {
 <cluster config_version="13" name="cname">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-1" nodeid="1">
+    <clusternode name="rh7-1.localhost" nodeid="1">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-1"/>
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-2" nodeid="2">
+    <clusternode name="rh7-2.localhost" nodeid="2">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-2"/>
+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-3" nodeid="3">
+    <clusternode name="rh7-3.localhost" nodeid="3">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-3"/>
+          <device name="pcmk-redirect" port="rh7-3.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -919,10 +919,10 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode remove --cluster_conf={0} rh7-3"
+            "cluster localnode remove --cluster_conf={0} rh7-3.localhost"
             .format(cluster_conf_tmp)
         )
-        ac(output, "rh7-3: successfully removed!\n")
+        ac(output, "rh7-3.localhost: successfully removed!\n")
         self.assertEqual(returnVal, 0)
 
         with open(cluster_conf_tmp) as f:
@@ -931,17 +931,17 @@ logging {
 <cluster config_version="15" name="cname">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-1" nodeid="1">
+    <clusternode name="rh7-1.localhost" nodeid="1">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-1"/>
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-2" nodeid="2">
+    <clusternode name="rh7-2.localhost" nodeid="2">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-2"/>
+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -959,10 +959,10 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode add --cluster_conf={0} rh7-3,192.168.1.3"
+            "cluster localnode add --cluster_conf={0} rh7-3.localhost,192.168.1.3"
             .format(cluster_conf_tmp)
         )
-        ac(output, "rh7-3,192.168.1.3: successfully added!\n")
+        ac(output, "rh7-3.localhost,192.168.1.3: successfully added!\n")
         self.assertEqual(returnVal, 0)
 
         with open(cluster_conf_tmp) as f:
@@ -971,25 +971,25 @@ logging {
 <cluster config_version="20" name="cname">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-1" nodeid="1">
+    <clusternode name="rh7-1.localhost" nodeid="1">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-1"/>
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-2" nodeid="2">
+    <clusternode name="rh7-2.localhost" nodeid="2">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-2"/>
+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-3" nodeid="3">
+    <clusternode name="rh7-3.localhost" nodeid="3">
       <altname name="192.168.1.3"/>
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-3"/>
+          <device name="pcmk-redirect" port="rh7-3.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -1007,10 +1007,10 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode remove --cluster_conf={0} rh7-2"
+            "cluster localnode remove --cluster_conf={0} rh7-2.localhost"
             .format(cluster_conf_tmp)
         )
-        ac(output, "rh7-2: successfully removed!\n")
+        ac(output, "rh7-2.localhost: successfully removed!\n")
         self.assertEqual(returnVal, 0)
 
         with open(cluster_conf_tmp) as f:
@@ -1019,18 +1019,18 @@ logging {
 <cluster config_version="22" name="cname">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-1" nodeid="1">
+    <clusternode name="rh7-1.localhost" nodeid="1">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-1"/>
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-3" nodeid="3">
+    <clusternode name="rh7-3.localhost" nodeid="3">
       <altname name="192.168.1.3"/>
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-3"/>
+          <device name="pcmk-redirect" port="rh7-3.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -1048,10 +1048,10 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode remove --cluster_conf={0} rh7-3,192.168.1.3"
+            "cluster localnode remove --cluster_conf={0} rh7-3.localhost,192.168.1.3"
             .format(cluster_conf_tmp)
         )
-        ac(output, "rh7-3,192.168.1.3: successfully removed!\n")
+        ac(output, "rh7-3.localhost,192.168.1.3: successfully removed!\n")
         self.assertEqual(returnVal, 0)
 
         with open(cluster_conf_tmp) as f:
@@ -1060,10 +1060,10 @@ logging {
 <cluster config_version="23" name="cname">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-1" nodeid="1">
+    <clusternode name="rh7-1.localhost" nodeid="1">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-1"/>
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -1086,7 +1086,7 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 rh7-3"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost rh7-3.localhost"
             .format(cluster_conf_tmp)
         )
         ac(output, "")
@@ -1097,24 +1097,24 @@ logging {
 <cluster config_version="12" name="cname">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-1" nodeid="1">
+    <clusternode name="rh7-1.localhost" nodeid="1">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-1"/>
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-2" nodeid="2">
+    <clusternode name="rh7-2.localhost" nodeid="2">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-2"/>
+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-3" nodeid="3">
+    <clusternode name="rh7-3.localhost" nodeid="3">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-3"/>
+          <device name="pcmk-redirect" port="rh7-3.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -1137,7 +1137,7 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --transport udpu"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --transport udpu"
             .format(cluster_conf_tmp)
         )
         ac(output, """\
@@ -1150,17 +1150,17 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
 <cluster config_version="9" name="cname">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-1" nodeid="1">
+    <clusternode name="rh7-1.localhost" nodeid="1">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-1"/>
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-2" nodeid="2">
+    <clusternode name="rh7-2.localhost" nodeid="2">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-2"/>
+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -1182,7 +1182,7 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --ipv6"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --ipv6"
             .format(corosync_conf_tmp)
         )
         self.assertEqual("", output)
@@ -1200,12 +1200,12 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 2
     }
 }
@@ -1228,7 +1228,7 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --ipv6"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --ipv6"
             .format(cluster_conf_tmp)
         )
         ac(output, """\
@@ -1241,17 +1241,17 @@ Warning: --ipv6 ignored as it is not supported on CMAN clusters
 <cluster config_version="9" name="cname">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-1" nodeid="1">
+    <clusternode name="rh7-1.localhost" nodeid="1">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-1"/>
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-2" nodeid="2">
+    <clusternode name="rh7-2.localhost" nodeid="2">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-2"/>
+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -1272,14 +1272,14 @@ Warning: --ipv6 ignored as it is not supported on CMAN clusters
             return
 
         o,r = pcs(
-            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr0 1.1.2.0"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr0 1.1.2.0"
             .format(corosync_conf_tmp)
         )
         assert r == 1
         ac(o, "Error: --addr0 can only be used once\n")
 
         o,r = pcs(
-            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode blah --broadcast0 --transport udp"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode blah --broadcast0 --transport udp"
             .format(corosync_conf_tmp)
         )
         assert r == 1
@@ -1289,7 +1289,7 @@ Warning: --ipv6 ignored as it is not supported on CMAN clusters
         )
 
         o,r = pcs(
-            "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0"
+            "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0"
             .format(corosync_conf_tmp)
         )
         ac(o,"")
@@ -1321,12 +1321,12 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 2
     }
 }
@@ -1348,7 +1348,7 @@ logging {
             return
 
         o,r = pcs(
-            "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcast0 8.8.8.8 --addr1 1.1.2.0 --mcast1 9.9.9.9"
+            "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --mcast0 8.8.8.8 --addr1 1.1.2.0 --mcast1 9.9.9.9"
             .format(corosync_conf_tmp)
         )
         ac(o,"")
@@ -1380,12 +1380,12 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 2
     }
 }
@@ -1407,7 +1407,7 @@ logging {
             return
 
         o,r = pcs(
-            "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcastport0 9999 --mcastport1 9998 --addr1 1.1.2.0"
+            "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --mcastport0 9999 --mcastport1 9998 --addr1 1.1.2.0"
             .format(corosync_conf_tmp)
         )
         ac(o,"")
@@ -1439,12 +1439,12 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 2
     }
 }
@@ -1466,7 +1466,7 @@ logging {
             return
 
         o,r = pcs(
-            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --ttl0 4 --ttl1 5 --transport udp"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --ttl0 4 --ttl1 5 --transport udp"
             .format(corosync_conf_tmp)
         )
         ac(o,"")
@@ -1500,12 +1500,12 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 2
     }
 }
@@ -1527,14 +1527,14 @@ logging {
             return
 
         o,r = pcs(
-            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
             .format(corosync_conf_tmp)
         )
         ac(o, "Error: using a RRP mode of 'active' is not supported or tested, use --force to override\n")
         assert r == 1
 
         o,r = pcs(
-            "cluster setup --force --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
+            "cluster setup --force --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
             .format(corosync_conf_tmp)
         )
         ac(o, "Warning: using a RRP mode of 'active' is not supported or tested\n")
@@ -1566,12 +1566,12 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 2
     }
 }
@@ -1593,14 +1593,14 @@ logging {
             return
 
         o,r = pcs(
-            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
             .format(corosync_conf_tmp)
         )
         ac(o, "Error: using a RRP mode of 'active' is not supported or tested, use --force to override\n")
         assert r == 1
 
         o,r = pcs(
-            "cluster setup --force --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
+            "cluster setup --force --local --corosync_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
             .format(corosync_conf_tmp)
         )
         ac(o, "Warning: using a RRP mode of 'active' is not supported or tested\n")
@@ -1631,12 +1631,12 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 2
     }
 }
@@ -1658,25 +1658,25 @@ logging {
             return
 
         o,r = pcs(
-            "cluster setup --local --corosync_conf={0} --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2,192.168.99.3"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost,192.168.99.1 rh7-2.localhost,192.168.99.2,192.168.99.3"
             .format(corosync_conf_tmp)
         )
-        ac(o,"Error: You cannot specify more than two addresses for a node: rh7-2,192.168.99.2,192.168.99.3\n")
+        ac(o,"Error: You cannot specify more than two addresses for a node: rh7-2.localhost,192.168.99.2,192.168.99.3\n")
         assert r == 1
 
         o,r = pcs(
-            "cluster setup --local --corosync_conf={0} --name cname rh7-1,192.168.99.1 rh7-2"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost,192.168.99.1 rh7-2.localhost"
             .format(corosync_conf_tmp)
         )
         ac(o,"Error: if one node is configured for RRP, all nodes must be configured for RRP\n")
         assert r == 1
 
-        o,r = pcs("cluster setup --force --local --name test99 rh7-1 rh7-2 --addr0 1.1.1.1")
+        o,r = pcs("cluster setup --force --local --name test99 rh7-1.localhost rh7-2.localhost --addr0 1.1.1.1")
         ac(o,"Error: --addr0 and --addr1 can only be used with --transport=udp\n")
         assert r == 1
 
         o,r = pcs(
-            "cluster setup --local --corosync_conf={0} --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1.localhost,192.168.99.1 rh7-2.localhost,192.168.99.2"
             .format(corosync_conf_tmp)
         )
         ac(o,"")
@@ -1694,13 +1694,13 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         ring1_addr: 192.168.99.1
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         ring1_addr: 192.168.99.2
         nodeid: 2
     }
@@ -1723,49 +1723,49 @@ logging {
             return
 
         o,r = pcs(
-            "cluster setup --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --wait_for_all=2"
+            "cluster setup --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --wait_for_all=2"
             .format(corosync_conf_tmp)
         )
         ac(o, "Error: '2' is not a valid --wait_for_all value, use 0, 1\n")
         assert r == 1
 
         o,r = pcs(
-            "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --wait_for_all=2"
+            "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --wait_for_all=2"
             .format(corosync_conf_tmp)
         )
         ac(o, "Error: '2' is not a valid --wait_for_all value, use 0, 1\n")
         assert r == 1
 
         o,r = pcs(
-            "cluster setup --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --auto_tie_breaker=2"
+            "cluster setup --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --auto_tie_breaker=2"
             .format(corosync_conf_tmp)
         )
         ac(o, "Error: '2' is not a valid --auto_tie_breaker value, use 0, 1\n")
         assert r == 1
 
         o,r = pcs(
-            "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --auto_tie_breaker=2"
+            "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --auto_tie_breaker=2"
             .format(corosync_conf_tmp)
         )
         ac(o, "Error: '2' is not a valid --auto_tie_breaker value, use 0, 1\n")
         assert r == 1
 
         o,r = pcs(
-            "cluster setup --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --last_man_standing=2"
+            "cluster setup --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --last_man_standing=2"
             .format(corosync_conf_tmp)
         )
         ac(o, "Error: '2' is not a valid --last_man_standing value, use 0, 1\n")
         assert r == 1
 
         o,r = pcs(
-            "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --last_man_standing=2"
+            "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --last_man_standing=2"
             .format(corosync_conf_tmp)
         )
         ac(o, "Error: '2' is not a valid --last_man_standing value, use 0, 1\n")
         assert r == 1
 
         o,r = pcs(
-            "cluster setup --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --wait_for_all=1 --auto_tie_breaker=1 --last_man_standing=1 --last_man_standing_window=12000"
+            "cluster setup --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --wait_for_all=1 --auto_tie_breaker=1 --last_man_standing=1 --last_man_standing_window=12000"
             .format(corosync_conf_tmp)
         )
         ac(o,"")
@@ -1782,12 +1782,12 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 2
     }
 }
@@ -1813,14 +1813,14 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr0 1.1.2.0"
+            "cluster setup --local --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr0 1.1.2.0"
         )
         ac(output, "Error: --addr0 can only be used once\n")
         self.assertEqual(returnVal, 1)
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode blah --broadcast0 --transport udp"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode blah --broadcast0 --transport udp"
             .format(cluster_conf_tmp)
         )
         ac(output, """\
@@ -1831,7 +1831,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0"
+            "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0"
             .format(cluster_conf_tmp)
         )
         ac(output, "")
@@ -1843,19 +1843,19 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
 <cluster config_version="14" name="cname">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-1" nodeid="1">
+    <clusternode name="rh7-1.localhost" nodeid="1">
       <altname name="1.1.2.0"/>
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-1"/>
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-2" nodeid="2">
+    <clusternode name="rh7-2.localhost" nodeid="2">
       <altname name="1.1.2.0"/>
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-2"/>
+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -1881,7 +1881,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcast0 8.8.8.8 --addr1 1.1.2.0 --mcast1 9.9.9.9"
+            "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --mcast0 8.8.8.8 --addr1 1.1.2.0 --mcast1 9.9.9.9"
             .format(cluster_conf_tmp)
         )
         ac(output, "")
@@ -1893,19 +1893,19 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
 <cluster config_version="14" name="cname">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-1" nodeid="1">
+    <clusternode name="rh7-1.localhost" nodeid="1">
       <altname name="1.1.2.0"/>
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-1"/>
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-2" nodeid="2">
+    <clusternode name="rh7-2.localhost" nodeid="2">
       <altname name="1.1.2.0"/>
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-2"/>
+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -1931,7 +1931,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcastport0 9999 --mcastport1 9998 --addr1 1.1.2.0"
+            "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --mcastport0 9999 --mcastport1 9998 --addr1 1.1.2.0"
             .format(cluster_conf_tmp)
         )
         ac(output, "")
@@ -1943,19 +1943,19 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
 <cluster config_version="14" name="cname">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-1" nodeid="1">
+    <clusternode name="rh7-1.localhost" nodeid="1">
       <altname name="1.1.2.0"/>
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-1"/>
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-2" nodeid="2">
+    <clusternode name="rh7-2.localhost" nodeid="2">
       <altname name="1.1.2.0"/>
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-2"/>
+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -1981,7 +1981,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --ttl0 4 --ttl1 5 --transport udp"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --ttl0 4 --ttl1 5 --transport udp"
             .format(cluster_conf_tmp)
         )
         ac(output, "")
@@ -1993,19 +1993,19 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
 <cluster config_version="14" name="cname">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-1" nodeid="1">
+    <clusternode name="rh7-1.localhost" nodeid="1">
       <altname name="1.1.2.0"/>
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-1"/>
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-2" nodeid="2">
+    <clusternode name="rh7-2.localhost" nodeid="2">
       <altname name="1.1.2.0"/>
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-2"/>
+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -2031,7 +2031,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
             .format(cluster_conf_tmp)
         )
         ac(
@@ -2042,7 +2042,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --force --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
+            "cluster setup --force --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
             .format(cluster_conf_tmp)
         )
         ac(
@@ -2056,19 +2056,19 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
 <cluster config_version="14" name="cname">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-1" nodeid="1">
+    <clusternode name="rh7-1.localhost" nodeid="1">
       <altname name="1.1.2.0"/>
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-1"/>
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-2" nodeid="2">
+    <clusternode name="rh7-2.localhost" nodeid="2">
       <altname name="1.1.2.0"/>
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-2"/>
+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -2094,7 +2094,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
             .format(cluster_conf_tmp)
         )
         ac(output, """\
@@ -2105,7 +2105,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --force --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
+            "cluster setup --force --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
             .format(cluster_conf_tmp)
         )
         ac(output, """\
@@ -2119,19 +2119,19 @@ Warning: using a RRP mode of 'active' is not supported or tested
 <cluster config_version="12" name="cname">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-1" nodeid="1">
+    <clusternode name="rh7-1.localhost" nodeid="1">
       <altname name="1.1.2.0"/>
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-1"/>
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-2" nodeid="2">
+    <clusternode name="rh7-2.localhost" nodeid="2">
       <altname name="1.1.2.0"/>
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-2"/>
+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -2154,17 +2154,17 @@ Warning: using a RRP mode of 'active' is not supported or tested
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf={0} --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2,192.168.99.3"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost,192.168.99.1 rh7-2.localhost,192.168.99.2,192.168.99.3"
             .format(cluster_conf_tmp)
         )
         ac(output, """\
-Error: You cannot specify more than two addresses for a node: rh7-2,192.168.99.2,192.168.99.3
+Error: You cannot specify more than two addresses for a node: rh7-2.localhost,192.168.99.2,192.168.99.3
 """)
         self.assertEqual(returnVal, 1)
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --name cname rh7-1,192.168.99.1 rh7-2"
+            "cluster setup --local --name cname rh7-1.localhost,192.168.99.1 rh7-2.localhost"
         )
         ac(output, """\
 Error: if one node is configured for RRP, all nodes must be configured for RRP
@@ -2173,7 +2173,7 @@ Error: if one node is configured for RRP, all nodes must be configured for RRP
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --name test99 rh7-1 rh7-2 --addr0 1.1.1.1 --transport=udpu"
+            "cluster setup --local --name test99 rh7-1.localhost rh7-2.localhost --addr0 1.1.1.1 --transport=udpu"
         )
         ac(output, """\
 Error: --addr0 and --addr1 can only be used with --transport=udp
@@ -2183,7 +2183,7 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf={0} --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost,192.168.99.1 rh7-2.localhost,192.168.99.2"
             .format(cluster_conf_tmp)
         )
         ac(output, "")
@@ -2194,19 +2194,19 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
 <cluster config_version="12" name="cname">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-1" nodeid="1">
+    <clusternode name="rh7-1.localhost" nodeid="1">
       <altname name="192.168.99.1"/>
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-1"/>
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-2" nodeid="2">
+    <clusternode name="rh7-2.localhost" nodeid="2">
       <altname name="192.168.99.2"/>
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-2"/>
+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -2231,19 +2231,19 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
 <cluster config_version="12" name="cname">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-1" nodeid="1">
+    <clusternode name="rh7-1.localhost" nodeid="1">
       <altname name="1.1.2.0"/>
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-1"/>
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-2" nodeid="2">
+    <clusternode name="rh7-2.localhost" nodeid="2">
       <altname name="1.1.2.0"/>
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-2"/>
+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -2262,7 +2262,7 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode passive --broadcast0 --transport udp"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode passive --broadcast0 --transport udp"
             .format(cluster_conf_tmp)
         )
         ac(output, """\
@@ -2277,7 +2277,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --broadcast0 --transport udp"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1.localhost rh7-2.localhost --addr0 1.1.1.0 --addr1 1.1.2.0 --broadcast0 --transport udp"
             .format(cluster_conf_tmp)
         )
         ac(output, """\
@@ -2294,7 +2294,7 @@ Warning: Enabling broadcast for all rings as CMAN does not support broadcast in
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf={0} --name test99 rh7-1 rh7-2 --wait_for_all=2 --auto_tie_breaker=3 --last_man_standing=4 --last_man_standing_window=5"
+            "cluster setup --local --cluster_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --wait_for_all=2 --auto_tie_breaker=3 --last_man_standing=4 --last_man_standing_window=5"
             .format(cluster_conf_tmp)
         )
         ac(output, """\
@@ -2310,17 +2310,17 @@ Warning: --last_man_standing_window ignored as it is not supported on CMAN clust
 <cluster config_version="9" name="test99">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-1" nodeid="1">
+    <clusternode name="rh7-1.localhost" nodeid="1">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-1"/>
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-2" nodeid="2">
+    <clusternode name="rh7-2.localhost" nodeid="2">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-2"/>
+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -2341,7 +2341,7 @@ Warning: --last_man_standing_window ignored as it is not supported on CMAN clust
             return
 
         o,r = pcs(
-            "cluster setup --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --token 20000 --join 20001 --consensus 20002 --miss_count_const 20003 --fail_recv_const 20004 --token_coefficient 20005"
+            "cluster setup --local --corosync_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --token 20000 --join 20001 --consensus 20002 --miss_count_const 20003 --fail_recv_const 20004 --token_coefficient 20005"
             .format(corosync_conf_tmp)
         )
         ac(o,"")
@@ -2364,12 +2364,12 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 2
     }
 }
@@ -2392,7 +2392,7 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf={0} --name test99 rh7-1 rh7-2 --token 20000 --join 20001 --consensus 20002 --miss_count_const 20003 --fail_recv_const 20004 --token_coefficient 20005"
+            "cluster setup --local --cluster_conf={0} --name test99 rh7-1.localhost rh7-2.localhost --token 20000 --join 20001 --consensus 20002 --miss_count_const 20003 --fail_recv_const 20004 --token_coefficient 20005"
             .format(cluster_conf_tmp)
         )
         ac(output, """\
@@ -2405,17 +2405,17 @@ Warning: --token_coefficient ignored as it is not supported on CMAN clusters
 <cluster config_version="10" name="test99">
   <fence_daemon/>
   <clusternodes>
-    <clusternode name="rh7-1" nodeid="1">
+    <clusternode name="rh7-1.localhost" nodeid="1">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-1"/>
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
         </method>
       </fence>
     </clusternode>
-    <clusternode name="rh7-2" nodeid="2">
+    <clusternode name="rh7-2.localhost" nodeid="2">
       <fence>
         <method name="pcmk-method">
-          <device name="pcmk-redirect" port="rh7-2"/>
+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
         </method>
       </fence>
     </clusternode>
@@ -2583,12 +2583,12 @@ Warning: --token_coefficient ignored as it is not supported on CMAN clusters
             return
 
         self.assert_pcs_fail(
-            "cluster setup --local --name cname rh7-1 rh7-2 --transport=unknown",
+            "cluster setup --local --name cname rh7-1.localhost rh7-2.localhost --transport=unknown",
             "Error: 'unknown' is not a valid transport value, use udp, udpu, use --force to override\n"
         )
 
         self.assert_pcs_success(
-            "cluster setup --local --name cname rh7-1 rh7-2 --transport=unknown --force",
+            "cluster setup --local --name cname rh7-1.localhost rh7-2.localhost --transport=unknown --force",
             "Warning: 'unknown' is not a valid transport value, use udp, udpu\n"
         )
         with open(corosync_conf_tmp) as f:
@@ -2603,12 +2603,12 @@ totem {
 
 nodelist {
     node {
-        ring0_addr: rh7-1
+        ring0_addr: rh7-1.localhost
         nodeid: 1
     }
 
     node {
-        ring0_addr: rh7-2
+        ring0_addr: rh7-2.localhost
         nodeid: 2
     }
 }
@@ -2630,12 +2630,12 @@ logging {
             return
 
         self.assert_pcs_fail(
-            "cluster setup --local --name cname rh7-1 rh7-2 --transport=rdma",
+            "cluster setup --local --name cname rh7-1.localhost rh7-2.localhost --transport=rdma",
             "Error: 'rdma' is not a valid transport value, use udp, udpu, use --force to override\n"
         )
 
         self.assert_pcs_success(
-            "cluster setup --local --name cname rh7-1 rh7-2 --transport=rdma --force",
+            "cluster setup --local --name cname rh7-1.localhost rh7-2.localhost --transport=rdma --force",
             "Warning: 'rdma' is not a valid transport value, use udp, udpu\n"
         )
         with open(cluster_conf_tmp) as f:
@@ -2644,6 +2644,115 @@ logging {
 <cluster config_version="9" name="cname">
   <fence_daemon/>
   <clusternodes>
+    <clusternode name="rh7-1.localhost" nodeid="1">
+      <fence>
+        <method name="pcmk-method">
+          <device name="pcmk-redirect" port="rh7-1.localhost"/>
+        </method>
+      </fence>
+    </clusternode>
+    <clusternode name="rh7-2.localhost" nodeid="2">
+      <fence>
+        <method name="pcmk-method">
+          <device name="pcmk-redirect" port="rh7-2.localhost"/>
+        </method>
+      </fence>
+    </clusternode>
+  </clusternodes>
+  <cman broadcast="no" expected_votes="1" transport="rdma" two_node="1"/>
+  <fencedevices>
+    <fencedevice agent="fence_pcmk" name="pcmk-redirect"/>
+  </fencedevices>
+  <rm>
+    <failoverdomains/>
+    <resources/>
+  </rm>
+</cluster>
+""")
+
+    def test_node_add_rhel6_unexpected_fence_pcmk_name(self):
+        if not utils.is_rhel6():
+            return
+
+        # chnage the fence device name to a value different that set by pcs
+        with open(cluster_conf_file, "r") as f:
+            data = f.read()
+        data = data.replace('name="pcmk-redirect"', 'name="pcmk_redirect"')
+        with open(cluster_conf_tmp, "w") as f:
+            f.write(data)
+
+        # test a node is added correctly and uses the fence device
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster localnode add --cluster_conf={0} rh7-3.localhost"
+            .format(cluster_conf_tmp)
+        )
+        ac(output, "rh7-3.localhost: successfully added!\n")
+        self.assertEqual(returnVal, 0)
+        with open(cluster_conf_tmp) as f:
+            data = f.read()
+            ac(data, """\
+<cluster config_version="13" name="test99">
+  <fence_daemon/>
+  <clusternodes>
+    <clusternode name="rh7-1" nodeid="1">
+      <fence>
+        <method name="pcmk-method">
+          <device name="pcmk_redirect" port="rh7-1"/>
+        </method>
+      </fence>
+    </clusternode>
+    <clusternode name="rh7-2" nodeid="2">
+      <fence>
+        <method name="pcmk-method">
+          <device name="pcmk_redirect" port="rh7-2"/>
+        </method>
+      </fence>
+    </clusternode>
+    <clusternode name="rh7-3.localhost" nodeid="3">
+      <fence>
+        <method name="pcmk-method">
+          <device name="pcmk_redirect" port="rh7-3.localhost"/>
+        </method>
+      </fence>
+    </clusternode>
+  </clusternodes>
+  <cman broadcast="no" transport="udpu"/>
+  <fencedevices>
+    <fencedevice agent="fence_pcmk" name="pcmk_redirect"/>
+  </fencedevices>
+  <rm>
+    <failoverdomains/>
+    <resources/>
+  </rm>
+</cluster>
+""")
+
+    def test_node_add_rhel6_missing_fence_pcmk(self):
+        if not utils.is_rhel6():
+            return
+
+        # chnage the fence device name to a value different that set by pcs
+        with open(cluster_conf_file, "r") as f:
+            data = f.read()
+        data = data.replace('agent="fence_pcmk"', 'agent="fence_whatever"')
+        with open(cluster_conf_tmp, "w") as f:
+            f.write(data)
+
+        # test a node is added correctly and uses the fence device
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster localnode add --cluster_conf={0} rh7-3.localhost"
+            .format(cluster_conf_tmp)
+        )
+        ac(output, "rh7-3.localhost: successfully added!\n")
+        self.assertEqual(returnVal, 0)
+        with open(cluster_conf_tmp) as f:
+            data = f.read()
+            ac(data, """\
+<cluster config_version="14" name="test99">
+  <fence_daemon/>
+  <clusternodes>
     <clusternode name="rh7-1" nodeid="1">
       <fence>
         <method name="pcmk-method">
@@ -2658,10 +2767,18 @@ logging {
         </method>
       </fence>
     </clusternode>
+    <clusternode name="rh7-3.localhost" nodeid="3">
+      <fence>
+        <method name="pcmk-method">
+          <device name="pcmk-redirect-1" port="rh7-3.localhost"/>
+        </method>
+      </fence>
+    </clusternode>
   </clusternodes>
-  <cman broadcast="no" expected_votes="1" transport="rdma" two_node="1"/>
+  <cman broadcast="no" transport="udpu"/>
   <fencedevices>
-    <fencedevice agent="fence_pcmk" name="pcmk-redirect"/>
+    <fencedevice agent="fence_whatever" name="pcmk-redirect"/>
+    <fencedevice agent="fence_pcmk" name="pcmk-redirect-1"/>
   </fencedevices>
   <rm>
     <failoverdomains/>
@@ -2669,3 +2786,4 @@ logging {
   </rm>
 </cluster>
 """)
+
diff --git a/pcs/test/test_common_tools.py b/pcs/test/test_common_tools.py
index 5c8482e..d9b6af3 100644
--- a/pcs/test/test_common_tools.py
+++ b/pcs/test/test_common_tools.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 import time
 
 from pcs.common import tools
@@ -63,3 +63,35 @@ class RunParallelTestCase(TestCase):
         elapsed_time = finish_time - start_time
         self.assertTrue(elapsed_time > x)
         self.assertTrue(elapsed_time < sum([i + 1 for i in range(x)]))
+
+
+class JoinMultilinesTest(TestCase):
+    def test_empty_input(self):
+        self.assertEqual(
+            "",
+            tools.join_multilines([])
+        )
+
+    def test_two_strings(self):
+        self.assertEqual(
+            "a\nb",
+            tools.join_multilines(["a", "b"])
+        )
+
+    def test_strip(self):
+        self.assertEqual(
+            "a\nb",
+            tools.join_multilines(["  a\n", "  b\n"])
+        )
+
+    def test_skip_empty(self):
+        self.assertEqual(
+            "a\nb",
+            tools.join_multilines(["  a\n", "   \n", "  b\n"])
+        )
+
+    def test_multiline(self):
+        self.assertEqual(
+            "a\nA\nb\nB",
+            tools.join_multilines(["a\nA\n", "b\nB\n"])
+        )
diff --git a/pcs/test/test_constraints.py b/pcs/test/test_constraints.py
index 364b40d..fee7093 100644
--- a/pcs/test/test_constraints.py
+++ b/pcs/test/test_constraints.py
@@ -7,7 +7,7 @@ from __future__ import (
 
 import os
 import shutil
-import unittest
+from pcs.test.tools import pcs_unittest as unittest
 
 from pcs.test.tools.assertions import AssertPcsMixin, console_report
 from pcs.test.tools.misc import (
@@ -2686,6 +2686,50 @@ class TicketAdd(ConstraintBaseTest):
             "  Master A loss-policy=fence ticket=T",
         ])
 
+class TicketRemoveTest(ConstraintBaseTest):
+    def test_remove_multiple_tickets(self):
+        #fixture
+        self.assert_pcs_success('constraint ticket add T A')
+        self.assert_pcs_success(
+            'constraint ticket add T A --force',
+            stdout_full=[
+                "Warning: duplicate constraint already exists",
+                "  A ticket=T (id:ticket-T-A)"
+            ]
+        )
+        self.assert_pcs_success(
+            'constraint ticket set A B setoptions ticket=T'
+        )
+        self.assert_pcs_success(
+            'constraint ticket set A setoptions ticket=T'
+        )
+        self.assert_pcs_success("constraint ticket show", stdout_full=[
+            "Ticket Constraints:",
+            "  A ticket=T",
+            "  A ticket=T",
+            "  Resource Sets:",
+            "    set A B setoptions ticket=T",
+            "    set A setoptions ticket=T",
+        ])
+
+        #test
+        self.assert_pcs_success("constraint ticket remove T A")
+
+        self.assert_pcs_success("constraint ticket show", stdout_full=[
+            "Ticket Constraints:",
+            "  Resource Sets:",
+            "    set B setoptions ticket=T",
+        ])
+
+    def test_fail_when_no_matching_ticket_constraint_here(self):
+        self.assert_pcs_success("constraint ticket show", stdout_full=[
+            "Ticket Constraints:",
+        ])
+        self.assert_pcs_fail("constraint ticket remove T A", [
+            "Error: no matching ticket constraint found"
+        ])
+
+
 class TicketShow(ConstraintBaseTest):
     def test_show_set(self):
         self.assert_pcs_success('constraint ticket set A B setoptions ticket=T')
diff --git a/pcs/test/test_lib_cib_acl.py b/pcs/test/test_lib_cib_acl.py
index 7e1750e..efaad7e 100644
--- a/pcs/test/test_lib_cib_acl.py
+++ b/pcs/test/test_lib_cib_acl.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
 from pcs.test.tools.assertions import (
     assert_raise_library_error,
diff --git a/pcs/test/test_lib_cib_tools.py b/pcs/test/test_lib_cib_tools.py
index 1149a3f..ec9c312 100644
--- a/pcs/test/test_lib_cib_tools.py
+++ b/pcs/test/test_lib_cib_tools.py
@@ -5,8 +5,9 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
+from os.path import join
 from lxml import etree
 
 from pcs.test.tools.assertions import (
@@ -14,9 +15,10 @@ from pcs.test.tools.assertions import (
     assert_xml_equal,
 )
 from pcs.test.tools.misc import get_test_resource as rc
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 from pcs.test.tools.xml import get_xml_manipulation_creator_from_file
 
+from pcs import settings
 from pcs.common import report_codes
 from pcs.lib.external import CommandRunner
 from pcs.lib.errors import ReportItemSeverity as severities
@@ -48,6 +50,30 @@ class DoesIdExistTest(CibToolsTest):
         self.assertFalse(lib.does_id_exist(self.cib.tree, "myId "))
         self.assertFalse(lib.does_id_exist(self.cib.tree, "my Id"))
 
+    def test_ignore_status_section(self):
+        self.cib.append_to_first_tag_name(
+            "status",
+            """\
+<elem1 id="status-1">
+    <elem1a id="status-1a">
+        <elem1aa id="status-1aa"/>
+        <elem1ab id="status-1ab"/>
+    </elem1a>
+    <elem1b id="status-1b">
+        <elem1ba id="status-1ba"/>
+        <elem1bb id="status-1bb"/>
+    </elem1b>
+</elem1>
+"""
+        )
+        self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1"))
+        self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1a"))
+        self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1aa"))
+        self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1ab"))
+        self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1b"))
+        self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1ba"))
+        self.assertFalse(lib.does_id_exist(self.cib.tree, "status-1bb"))
+
 class FindUniqueIdTest(CibToolsTest):
     def test_already_unique(self):
         self.fixture_add_primitive_with_id("myId")
@@ -112,6 +138,27 @@ class GetConstraintsTest(CibToolsTest):
             ),
         )
 
+class GetResourcesTest(CibToolsTest):
+    def test_success_if_exists(self):
+        self.assertEqual(
+            "resources",
+            lib.get_resources(self.cib.tree).tag
+        )
+
+    def test_raise_if_missing(self):
+        for section in self.cib.tree.findall(".//configuration/resources"):
+            section.getparent().remove(section)
+        assert_raise_library_error(
+            lambda: lib.get_resources(self.cib.tree),
+            (
+                severities.ERROR,
+                report_codes.CIB_CANNOT_FIND_MANDATORY_SECTION,
+                {
+                    "section": "configuration/resources",
+                }
+            ),
+        )
+
 
 class GetAclsTest(CibToolsTest):
     def setUp(self):
@@ -324,3 +371,94 @@ class EnsureCibVersionTest(TestCase):
             )
         )
         mock_upgrade_cib.assert_called_once_with(self.cib, self.mock_runner)
+
+
+ at mock.patch("tempfile.NamedTemporaryFile")
+class UpgradeCibTest(TestCase):
+    def setUp(self):
+        self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
+
+    def test_success(self, mock_named_file):
+        mock_file = mock.MagicMock()
+        mock_file.name = "mock_file_name"
+        mock_file.read.return_value = "<cib/>"
+        mock_named_file.return_value = mock_file
+        self.mock_runner.run.return_value = ("", "", 0)
+        assert_xml_equal(
+            "<cib/>",
+            etree.tostring(
+                lib.upgrade_cib(etree.XML("<old_cib/>"), self.mock_runner)
+            ).decode()
+        )
+        mock_named_file.assert_called_once_with("w+", suffix=".pcs")
+        mock_file.write.assert_called_once_with("<old_cib/>")
+        mock_file.flush.assert_called_once_with()
+        self.mock_runner.run.assert_called_once_with(
+            [
+                join(settings.pacemaker_binaries, "cibadmin"),
+                "--upgrade",
+                "--force"
+            ],
+            env_extend={"CIB_file": "mock_file_name"}
+        )
+        mock_file.seek.assert_called_once_with(0)
+        mock_file.read.assert_called_once_with()
+
+    def test_upgrade_failed(self, mock_named_file):
+        mock_file = mock.MagicMock()
+        mock_file.name = "mock_file_name"
+        mock_named_file.return_value = mock_file
+        self.mock_runner.run.return_value = ("some info", "some error", 1)
+        assert_raise_library_error(
+            lambda: lib.upgrade_cib(etree.XML("<old_cib/>"), self.mock_runner),
+            (
+                severities.ERROR,
+                report_codes.CIB_UPGRADE_FAILED,
+                {
+                    "reason": "some error\nsome info",
+                }
+            )
+        )
+        mock_named_file.assert_called_once_with("w+", suffix=".pcs")
+        mock_file.write.assert_called_once_with("<old_cib/>")
+        mock_file.flush.assert_called_once_with()
+        self.mock_runner.run.assert_called_once_with(
+            [
+                join(settings.pacemaker_binaries, "cibadmin"),
+                "--upgrade",
+                "--force"
+            ],
+            env_extend={"CIB_file": "mock_file_name"}
+        )
+
+    def test_unable_to_parse_upgraded_cib(self, mock_named_file):
+        mock_file = mock.MagicMock()
+        mock_file.name = "mock_file_name"
+        mock_file.read.return_value = "not xml"
+        mock_named_file.return_value = mock_file
+        self.mock_runner.run.return_value = ("", "", 0)
+        assert_raise_library_error(
+            lambda: lib.upgrade_cib(etree.XML("<old_cib/>"), self.mock_runner),
+            (
+                severities.ERROR,
+                report_codes.CIB_UPGRADE_FAILED,
+                {
+                    "reason":
+                        "Start tag expected, '<' not found, line 1, column 1",
+                }
+            )
+        )
+        mock_named_file.assert_called_once_with("w+", suffix=".pcs")
+        mock_file.write.assert_called_once_with("<old_cib/>")
+        mock_file.flush.assert_called_once_with()
+        self.mock_runner.run.assert_called_once_with(
+            [
+                join(settings.pacemaker_binaries, "cibadmin"),
+                "--upgrade",
+                "--force"
+            ],
+            env_extend={"CIB_file": "mock_file_name"}
+        )
+        mock_file.seek.assert_called_once_with(0)
+        mock_file.read.assert_called_once_with()
+
diff --git a/pcs/test/test_lib_commands_qdevice.py b/pcs/test/test_lib_commands_qdevice.py
index ff588d5..756afa8 100644
--- a/pcs/test/test_lib_commands_qdevice.py
+++ b/pcs/test/test_lib_commands_qdevice.py
@@ -5,11 +5,11 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 import base64
 import logging
 
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 from pcs.test.tools.assertions import (
     assert_raise_library_error,
     assert_report_item_list_equal,
@@ -345,6 +345,7 @@ class QdeviceNetSetupTest(QdeviceTestCase):
         )
 
 
+ at mock.patch("pcs.lib.corosync.qdevice_net.qdevice_status_cluster_text")
 @mock.patch("pcs.lib.external.stop_service")
 @mock.patch("pcs.lib.external.disable_service")
 @mock.patch("pcs.lib.commands.qdevice.qdevice_net.qdevice_destroy")
@@ -355,7 +356,11 @@ class QdeviceNetSetupTest(QdeviceTestCase):
     lambda self: "mock_runner"
 )
 class QdeviceNetDestroyTest(QdeviceTestCase):
-    def test_success(self, mock_net_destroy, mock_net_disable, mock_net_stop):
+    def test_success_not_used(
+        self, mock_net_destroy, mock_net_disable, mock_net_stop, mock_status
+    ):
+        mock_status.return_value = ""
+
         lib.qdevice_destroy(self.lib_env, "net")
 
         mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd")
@@ -398,9 +403,85 @@ class QdeviceNetDestroyTest(QdeviceTestCase):
             ]
         )
 
+    def test_success_used_forced(
+        self, mock_net_destroy, mock_net_disable, mock_net_stop, mock_status
+    ):
+        mock_status.return_value = 'Cluster "a_cluster":\n'
+
+        lib.qdevice_destroy(self.lib_env, "net", proceed_if_used=True)
+
+        mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd")
+        mock_net_disable.assert_called_once_with(
+            "mock_runner",
+            "corosync-qnetd"
+        )
+        mock_net_destroy.assert_called_once_with()
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.WARNING,
+                    report_codes.QDEVICE_USED_BY_CLUSTERS,
+                    {
+                        "clusters": ["a_cluster"],
+                    }
+                ),
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_STOP_STARTED,
+                    {
+                        "service": "quorum device",
+                    }
+                ),
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_STOP_SUCCESS,
+                    {
+                        "service": "quorum device",
+                    }
+                ),
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_DISABLE_SUCCESS,
+                    {
+                        "service": "quorum device",
+                    }
+                ),
+                (
+                    severity.INFO,
+                    report_codes.QDEVICE_DESTROY_SUCCESS,
+                    {
+                        "model": "net",
+                    }
+                )
+            ]
+        )
+
+    def test_used_not_forced(
+        self, mock_net_destroy, mock_net_disable, mock_net_stop, mock_status
+    ):
+        mock_status.return_value = 'Cluster "a_cluster":\n'
+
+        assert_raise_library_error(
+            lambda: lib.qdevice_destroy(self.lib_env, "net"),
+            (
+                severity.ERROR,
+                report_codes.QDEVICE_USED_BY_CLUSTERS,
+                {
+                    "clusters": ["a_cluster"],
+                },
+                report_codes.FORCE_QDEVICE_USED
+            ),
+        )
+
+        mock_net_stop.assert_not_called()
+        mock_net_disable.assert_not_called()
+        mock_net_destroy.assert_not_called()
+
     def test_stop_failed(
-        self, mock_net_destroy, mock_net_disable, mock_net_stop
+        self, mock_net_destroy, mock_net_disable, mock_net_stop, mock_status
     ):
+        mock_status.return_value = ""
         mock_net_stop.side_effect = StopServiceError(
             "test service",
             "test error"
@@ -435,8 +516,9 @@ class QdeviceNetDestroyTest(QdeviceTestCase):
         )
 
     def test_disable_failed(
-        self, mock_net_destroy, mock_net_disable, mock_net_stop
+        self, mock_net_destroy, mock_net_disable, mock_net_stop, mock_status
     ):
+        mock_status.return_value = ""
         mock_net_disable.side_effect = DisableServiceError(
             "test service",
             "test error"
@@ -481,8 +563,9 @@ class QdeviceNetDestroyTest(QdeviceTestCase):
         )
 
     def test_destroy_failed(
-        self, mock_net_destroy, mock_net_disable, mock_net_stop
+        self, mock_net_destroy, mock_net_disable, mock_net_stop, mock_status
     ):
+        mock_status.return_value = ""
         mock_net_destroy.side_effect = LibraryError("mock_report_item")
 
         self.assertRaises(
@@ -755,6 +838,7 @@ class QdeviceNetStartTest(QdeviceTestCase):
         )
 
 
+ at mock.patch("pcs.lib.corosync.qdevice_net.qdevice_status_cluster_text")
 @mock.patch("pcs.lib.external.stop_service")
 @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
 @mock.patch.object(
@@ -763,13 +847,49 @@ class QdeviceNetStartTest(QdeviceTestCase):
     lambda self: "mock_runner"
 )
 class QdeviceNetStopTest(QdeviceTestCase):
-    def test_success(self, mock_net_stop):
-        lib.qdevice_stop(self.lib_env, "net")
+    def test_success_not_used(self, mock_net_stop, mock_status):
+        mock_status.return_value = ""
+
+        lib.qdevice_stop(self.lib_env, "net", proceed_if_used=False)
+
+        mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd")
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_STOP_STARTED,
+                    {
+                        "service": "quorum device",
+                    }
+                ),
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_STOP_SUCCESS,
+                    {
+                        "service": "quorum device",
+                    }
+                )
+            ]
+        )
+
+    def test_success_used_forced(self, mock_net_stop, mock_status):
+        mock_status.return_value = 'Cluster "a_cluster":\n'
+
+        lib.qdevice_stop(self.lib_env, "net", proceed_if_used=True)
+
         mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd")
         assert_report_item_list_equal(
             self.mock_reporter.report_item_list,
             [
                 (
+                    severity.WARNING,
+                    report_codes.QDEVICE_USED_BY_CLUSTERS,
+                    {
+                        "clusters": ["a_cluster"],
+                    }
+                ),
+                (
                     severity.INFO,
                     report_codes.SERVICE_STOP_STARTED,
                     {
@@ -786,7 +906,28 @@ class QdeviceNetStopTest(QdeviceTestCase):
             ]
         )
 
-    def test_failed(self, mock_net_stop):
+    def test_used_not_forced(self, mock_net_stop, mock_status):
+        mock_status.return_value = 'Cluster "a_cluster":\n'
+
+        assert_raise_library_error(
+            lambda: lib.qdevice_stop(
+                self.lib_env,
+                "net",
+                proceed_if_used=False
+            ),
+            (
+                severity.ERROR,
+                report_codes.QDEVICE_USED_BY_CLUSTERS,
+                {
+                    "clusters": ["a_cluster"],
+                },
+                report_codes.FORCE_QDEVICE_USED
+            ),
+        )
+        mock_net_stop.assert_not_called()
+
+    def test_failed(self, mock_net_stop, mock_status):
+        mock_status.return_value = ""
         mock_net_stop.side_effect = StopServiceError(
             "test service",
             "test error"
diff --git a/pcs/test/test_lib_commands_quorum.py b/pcs/test/test_lib_commands_quorum.py
index 826251a..1487eb4 100644
--- a/pcs/test/test_lib_commands_quorum.py
+++ b/pcs/test/test_lib_commands_quorum.py
@@ -6,7 +6,7 @@ from __future__ import (
 )
 
 import logging
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
 from pcs.test.tools.assertions import (
     assert_raise_library_error,
@@ -17,7 +17,7 @@ from pcs.test.tools.misc import (
     ac,
     get_test_resource as rc,
 )
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 
 from pcs.common import report_codes
 from pcs.lib.env import LibraryEnvironment
@@ -25,6 +25,7 @@ from pcs.lib.errors import (
     LibraryError,
     ReportItemSeverity as severity,
 )
+from pcs.lib.corosync.config_facade import ConfigFacade
 from pcs.lib.external import NodeCommunicationException
 from pcs.lib.node import NodeAddresses, NodeAddressesList
 
@@ -146,23 +147,201 @@ class GetQuorumConfigTest(TestCase, CmanMixin):
         self.assertEqual([], self.mock_reporter.report_item_list)
 
 
+ at mock.patch("pcs.lib.sbd.is_auto_tie_breaker_needed")
+class CheckIfAtbCanBeDisabledTest(TestCase):
+    def setUp(self):
+        self.mock_reporter = MockLibraryReportProcessor()
+        self.mock_runner = "cmd_runner"
+        self.mock_corosync_conf = mock.MagicMock(spec_set=ConfigFacade)
+
+    def test_atb_no_need_was_disabled_atb_disabled(self, mock_atb_needed):
+        mock_atb_needed.return_value = False
+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False
+        lib._check_if_atb_can_be_disabled(
+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, False
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    def test_atb_no_need_was_disabled_atb_enabled(self, mock_atb_needed):
+        mock_atb_needed.return_value = False
+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True
+        lib._check_if_atb_can_be_disabled(
+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, False
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    def test_atb_no_need_was_enable_atb_disabled(self, mock_atb_needed):
+        mock_atb_needed.return_value = False
+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False
+        lib._check_if_atb_can_be_disabled(
+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    def test_atb_no_need_was_enabled_atb_enabled(self, mock_atb_needed):
+        mock_atb_needed.return_value = False
+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True
+        lib._check_if_atb_can_be_disabled(
+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    def test_atb_needed_was_disabled_atb_disabled(self, mock_atb_needed):
+        mock_atb_needed.return_value = True
+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False
+        lib._check_if_atb_can_be_disabled(
+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, False
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    def test_atb_needed_was_disabled_atb_enabled(self, mock_atb_needed):
+        mock_atb_needed.return_value = True
+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True
+        lib._check_if_atb_can_be_disabled(
+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, False
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    def test_atb_needed_was_enable_atb_disabled(self, mock_atb_needed):
+        mock_atb_needed.return_value = True
+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False
+        report_item = (
+            severity.ERROR,
+            report_codes.COROSYNC_QUORUM_CANNOT_DISABLE_ATB_DUE_TO_SBD,
+            {},
+            report_codes.FORCE_OPTIONS
+        )
+        assert_raise_library_error(
+            lambda: lib._check_if_atb_can_be_disabled(
+                self.mock_runner,
+                self.mock_reporter,
+                self.mock_corosync_conf,
+                True
+            ),
+            report_item
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list, [report_item]
+        )
+
+    def test_atb_needed_was_enabled_atb_enabled(self, mock_atb_needed):
+        mock_atb_needed.return_value = True
+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True
+        lib._check_if_atb_can_be_disabled(
+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+
+    def test_atb_no_need_was_disabled_atb_disabled_force(
+        self, mock_atb_needed
+    ):
+        mock_atb_needed.return_value = False
+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False
+        lib._check_if_atb_can_be_disabled(
+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf,
+            False, force=True
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    def test_atb_no_need_was_disabled_atb_enabled_force(
+        self, mock_atb_needed
+    ):
+        mock_atb_needed.return_value = False
+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True
+        lib._check_if_atb_can_be_disabled(
+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf,
+            False, force=True
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    def test_atb_no_need_was_enable_atb_disabled_force(self, mock_atb_needed):
+        mock_atb_needed.return_value = False
+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False
+        lib._check_if_atb_can_be_disabled(
+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True,
+            force=True
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    def test_atb_no_need_was_enabled_atb_enabled_force(self, mock_atb_needed):
+        mock_atb_needed.return_value = False
+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True
+        lib._check_if_atb_can_be_disabled(
+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True,
+            force=True
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    def test_atb_needed_was_disabled_atb_disabled_force(
+        self, mock_atb_needed
+    ):
+        mock_atb_needed.return_value = True
+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False
+        lib._check_if_atb_can_be_disabled(
+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf,
+            False, force=True
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    def test_atb_needed_was_disabled_atb_enabled_force(self, mock_atb_needed):
+        mock_atb_needed.return_value = True
+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True
+        lib._check_if_atb_can_be_disabled(
+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf,
+            False, force=True
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    def test_atb_needed_was_enable_atb_disabled_force(self, mock_atb_needed):
+        mock_atb_needed.return_value = True
+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = False
+        lib._check_if_atb_can_be_disabled(
+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True,
+            force=True
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [(
+                severity.WARNING,
+                report_codes.COROSYNC_QUORUM_CANNOT_DISABLE_ATB_DUE_TO_SBD,
+                {},
+                None
+            )]
+        )
+
+    def test_atb_needed_was_enabled_atb_enabled_force(self, mock_atb_needed):
+        mock_atb_needed.return_value = True
+        self.mock_corosync_conf.is_enabled_auto_tie_breaker.return_value = True
+        lib._check_if_atb_can_be_disabled(
+            self.mock_runner, self.mock_reporter, self.mock_corosync_conf, True,
+            force=True
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+
+ at mock.patch("pcs.lib.commands.quorum._check_if_atb_can_be_disabled")
 @mock.patch.object(LibraryEnvironment, "push_corosync_conf")
 @mock.patch.object(LibraryEnvironment, "get_corosync_conf_data")
+ at mock.patch.object(LibraryEnvironment, "cmd_runner")
 class SetQuorumOptionsTest(TestCase, CmanMixin):
     def setUp(self):
         self.mock_logger = mock.MagicMock(logging.Logger)
         self.mock_reporter = MockLibraryReportProcessor()
 
     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
-    def test_disabled_on_cman(self, mock_get_corosync, mock_push_corosync):
+    def test_disabled_on_cman(
+        self, mock_runner, mock_get_corosync, mock_push_corosync, mock_check
+    ):
         lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
         self.assert_disabled_on_cman(lambda: lib.set_options(lib_env, {}))
         mock_get_corosync.assert_not_called()
         mock_push_corosync.assert_not_called()
+        mock_check.assert_not_called()
 
     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
     def test_enabled_on_cman_if_not_live(
-        self, mock_get_corosync, mock_push_corosync
+        self, mock_runner, mock_get_corosync, mock_push_corosync, mock_check
     ):
         original_conf = "invalid {\nconfig: stop after cman test"
         mock_get_corosync.return_value = original_conf
@@ -182,11 +361,16 @@ class SetQuorumOptionsTest(TestCase, CmanMixin):
         )
 
         mock_push_corosync.assert_not_called()
+        mock_check.assert_not_called()
+        mock_runner.assert_not_called()
 
     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-    def test_success(self, mock_get_corosync, mock_push_corosync):
+    def test_success(
+        self, mock_runner, mock_get_corosync, mock_push_corosync, mock_check
+    ):
         original_conf = open(rc("corosync-3nodes.conf")).read()
         mock_get_corosync.return_value = original_conf
+        mock_runner.return_value = "cmd_runner"
         lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
 
         new_options = {"wait_for_all": "1"}
@@ -201,9 +385,16 @@ class SetQuorumOptionsTest(TestCase, CmanMixin):
             )
         )
         self.assertEqual([], self.mock_reporter.report_item_list)
+        self.assertEqual(1, mock_check.call_count)
+        self.assertEqual("cmd_runner", mock_check.call_args[0][0])
+        self.assertEqual(self.mock_reporter, mock_check.call_args[0][1])
+        self.assertFalse(mock_check.call_args[0][3])
+        self.assertFalse(mock_check.call_args[0][4])
 
     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-    def test_bad_options(self, mock_get_corosync, mock_push_corosync):
+    def test_bad_options(
+        self, mock_runner, mock_get_corosync, mock_push_corosync, mock_check
+    ):
         original_conf = open(rc("corosync.conf")).read()
         mock_get_corosync.return_value = original_conf
         lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
@@ -228,9 +419,12 @@ class SetQuorumOptionsTest(TestCase, CmanMixin):
         )
 
         mock_push_corosync.assert_not_called()
+        mock_check.assert_not_called()
 
     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-    def test_bad_config(self, mock_get_corosync, mock_push_corosync):
+    def test_bad_config(
+        self, mock_runner, mock_get_corosync, mock_push_corosync, mock_check
+    ):
         original_conf = "invalid {\nconfig: this is"
         mock_get_corosync.return_value = original_conf
         lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
@@ -246,6 +440,7 @@ class SetQuorumOptionsTest(TestCase, CmanMixin):
         )
 
         mock_push_corosync.assert_not_called()
+        mock_check.assert_not_called()
 
 
 @mock.patch("pcs.lib.commands.quorum.corosync_live.get_quorum_status_text")
@@ -1384,10 +1579,14 @@ class RemoveDeviceTest(TestCase, CmanMixin):
         mock_remote_stop.assert_not_called()
 
     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-    def test_success(
+    @mock.patch("pcs.lib.sbd.is_sbd_installed", lambda self: True)
+    @mock.patch("pcs.lib.sbd.is_sbd_enabled", lambda self: True)
+    def test_success_3nodes_sbd(
         self, mock_remote_stop, mock_remote_disable, mock_remove_net,
         mock_get_corosync, mock_push_corosync
     ):
+        # nothing special needs to be done in regards of SBD if a cluster
+        # consists of odd number of nodes
         original_conf = open(rc("corosync-3nodes-qdevice.conf")).read()
         no_device_conf = open(rc("corosync-3nodes.conf")).read()
         mock_get_corosync.return_value = original_conf
@@ -1424,10 +1623,106 @@ class RemoveDeviceTest(TestCase, CmanMixin):
         self.assertEqual(3, len(mock_remote_stop.mock_calls))
 
     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-    def test_success_file(
+    @mock.patch("pcs.lib.sbd.is_sbd_installed", lambda self: False)
+    @mock.patch("pcs.lib.sbd.is_sbd_enabled", lambda self: False)
+    def test_success_2nodes_no_sbd(
         self, mock_remote_stop, mock_remote_disable, mock_remove_net,
         mock_get_corosync, mock_push_corosync
     ):
+        # cluster consists of two nodes, two_node must be set
+        original_conf = open(rc("corosync-qdevice.conf")).read()
+        no_device_conf = open(rc("corosync.conf")).read()
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        lib.remove_device(lib_env)
+
+        self.assertEqual(1, len(mock_push_corosync.mock_calls))
+        ac(
+            mock_push_corosync.mock_calls[0][1][0].config.export(),
+            no_device_conf
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_DISABLE_STARTED,
+                    {
+                        "service": "corosync-qdevice",
+                    }
+                ),
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_STOP_STARTED,
+                    {
+                        "service": "corosync-qdevice",
+                    }
+                ),
+            ]
+        )
+        self.assertEqual(1, len(mock_remove_net.mock_calls))
+        self.assertEqual(2, len(mock_remote_disable.mock_calls))
+        self.assertEqual(2, len(mock_remote_stop.mock_calls))
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    @mock.patch("pcs.lib.sbd.is_sbd_installed", lambda self: True)
+    @mock.patch("pcs.lib.sbd.is_sbd_enabled", lambda self: True)
+    def test_success_2nodes_sbd(
+        self, mock_remote_stop, mock_remote_disable, mock_remove_net,
+        mock_get_corosync, mock_push_corosync
+    ):
+        # cluster consists of two nodes, but SBD is in use
+        # auto tie breaker must be enabled
+        original_conf = open(rc("corosync-qdevice.conf")).read()
+        no_device_conf = open(rc("corosync.conf")).read().replace(
+            "two_node: 1",
+            "auto_tie_breaker: 1"
+        )
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        lib.remove_device(lib_env)
+
+        self.assertEqual(1, len(mock_push_corosync.mock_calls))
+        ac(
+            mock_push_corosync.mock_calls[0][1][0].config.export(),
+            no_device_conf
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.WARNING,
+                    report_codes.SBD_REQUIRES_ATB,
+                    {}
+                ),
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_DISABLE_STARTED,
+                    {
+                        "service": "corosync-qdevice",
+                    }
+                ),
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_STOP_STARTED,
+                    {
+                        "service": "corosync-qdevice",
+                    }
+                ),
+            ]
+        )
+        self.assertEqual(1, len(mock_remove_net.mock_calls))
+        self.assertEqual(2, len(mock_remote_disable.mock_calls))
+        self.assertEqual(2, len(mock_remote_stop.mock_calls))
+
+    @mock.patch("pcs.lib.sbd.atb_has_to_be_enabled")
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_success_file(
+        self, mock_atb_check, mock_remote_stop, mock_remote_disable,
+        mock_remove_net, mock_get_corosync, mock_push_corosync
+    ):
         original_conf = open(rc("corosync-3nodes-qdevice.conf")).read()
         no_device_conf = open(rc("corosync-3nodes.conf")).read()
         mock_get_corosync.return_value = original_conf
@@ -1448,6 +1743,7 @@ class RemoveDeviceTest(TestCase, CmanMixin):
         mock_remove_net.assert_not_called()
         mock_remote_disable.assert_not_called()
         mock_remote_stop.assert_not_called()
+        mock_atb_check.assert_not_called()
 
 
 @mock.patch("pcs.lib.commands.quorum.qdevice_net.remote_client_destroy")
diff --git a/pcs/test/test_lib_commands_sbd.py b/pcs/test/test_lib_commands_sbd.py
index 9a96757..f8146ce 100644
--- a/pcs/test/test_lib_commands_sbd.py
+++ b/pcs/test/test_lib_commands_sbd.py
@@ -7,9 +7,9 @@ from __future__ import (
 
 import logging
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 from pcs.test.tools.assertions import (
     assert_raise_library_error,
     assert_report_item_list_equal,
@@ -35,6 +35,15 @@ from pcs.lib.external import (
 import pcs.lib.commands.sbd as cmd_sbd
 
 
+def _assert_equal_list_of_dictionaries_without_order(expected, actual):
+    for item in actual:
+        if item not in expected:
+            raise AssertionError("Given but not expected: {0}".format(item))
+    for item in expected:
+        if item not in actual:
+            raise AssertionError("Expected but not given: {0}".format(item))
+
+
 class CommandSbdTest(TestCase):
     def setUp(self):
         self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment)
@@ -234,7 +243,8 @@ class ValidateSbdOptionsTest(TestCase):
             "SBD_STARTMODE": "clean",
             "SBD_WATCHDOG_DEV": "/dev/watchdog",
             "SBD_UNKNOWN": "",
-            "SBD_OPTS": "  "
+            "SBD_OPTS": "  ",
+            "SBD_PACEMAKER": "false",
         }
 
         assert_report_item_list_equal(
@@ -272,6 +282,90 @@ class ValidateSbdOptionsTest(TestCase):
                         "allowed_str": self.allowed_sbd_options_str
                     },
                     None
+                ),
+                (
+                    Severities.ERROR,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_name": "SBD_PACEMAKER",
+                        "option_type": None,
+                        "allowed": self.allowed_sbd_options,
+                        "allowed_str": self.allowed_sbd_options_str
+                    },
+                    None
+                )
+            ]
+        )
+
+    def test_watchdog_timeout_is_not_present(self):
+        config = {
+            "SBD_DELAY_START": "yes",
+            "SBD_STARTMODE": "clean"
+        }
+        self.assertEqual([], cmd_sbd._validate_sbd_options(config))
+
+    def test_watchdog_timeout_is_nonnegative_int(self):
+        config = {
+            "SBD_WATCHDOG_TIMEOUT": "-1",
+        }
+
+        assert_report_item_list_equal(
+            cmd_sbd._validate_sbd_options(config),
+            [
+                (
+                    Severities.ERROR,
+                    report_codes.INVALID_OPTION_VALUE,
+                    {
+                        "option_name": "SBD_WATCHDOG_TIMEOUT",
+                        "option_value": "-1",
+                        "allowed_values": "nonnegative integer",
+                        "allowed_values_str": "nonnegative integer",
+                    },
+                    None
+                )
+            ]
+        )
+
+    def test_watchdog_timeout_is_not_int(self):
+        config = {
+            "SBD_WATCHDOG_TIMEOUT": "not int",
+        }
+
+        assert_report_item_list_equal(
+            cmd_sbd._validate_sbd_options(config),
+            [
+                (
+                    Severities.ERROR,
+                    report_codes.INVALID_OPTION_VALUE,
+                    {
+                        "option_name": "SBD_WATCHDOG_TIMEOUT",
+                        "option_value": "not int",
+                        "allowed_values": "nonnegative integer",
+                        "allowed_values_str": "nonnegative integer",
+                    },
+                    None
+                )
+            ]
+        )
+
+    def test_watchdog_timeout_is_none(self):
+        config = {
+            "SBD_WATCHDOG_TIMEOUT": None,
+        }
+
+        assert_report_item_list_equal(
+            cmd_sbd._validate_sbd_options(config),
+            [
+                (
+                    Severities.ERROR,
+                    report_codes.INVALID_OPTION_VALUE,
+                    {
+                        "option_name": "SBD_WATCHDOG_TIMEOUT",
+                        "option_value": None,
+                        "allowed_values": "nonnegative integer",
+                        "allowed_values_str": "nonnegative integer",
+                    },
+                    None
                 )
             ]
         )
@@ -325,6 +419,35 @@ class GetFullWatchdogListTest(TestCase):
             )
         )
 
+    def test_invalid_watchdogs(self):
+        watchdog_dict = {
+            self.node_list[1].label: "",
+            self.node_list[2].label: None,
+            self.node_list[3].label: "not/abs/path",
+            self.node_list[4].label: "/dev/watchdog"
+
+        }
+        assert_raise_library_error(
+            lambda: cmd_sbd._get_full_watchdog_list(
+                self.node_list, "/dev/dog", watchdog_dict
+            ),
+            (
+                Severities.ERROR,
+                report_codes.WATCHDOG_INVALID,
+                {"watchdog": ""}
+            ),
+            (
+                Severities.ERROR,
+                report_codes.WATCHDOG_INVALID,
+                {"watchdog": None}
+            ),
+            (
+                Severities.ERROR,
+                report_codes.WATCHDOG_INVALID,
+                {"watchdog": "not/abs/path"}
+            )
+        )
+
 
 @mock.patch("pcs.lib.commands.sbd._get_cluster_nodes")
 @mock.patch("pcs.lib.sbd.check_sbd")
@@ -393,8 +516,7 @@ class GetClusterSbdStatusTest(CommandSbdTest):
                 }
             }
         ]
-
-        self.assertEqual(
+        _assert_equal_list_of_dictionaries_without_order(
             expected, cmd_sbd.get_cluster_sbd_status(self.mock_env)
         )
         mock_get_nodes.assert_called_once_with(self.mock_env)
@@ -447,7 +569,7 @@ class GetClusterSbdStatusTest(CommandSbdTest):
             }
         ]
 
-        self.assertEqual(
+        _assert_equal_list_of_dictionaries_without_order(
             expected, cmd_sbd.get_cluster_sbd_status(self.mock_env)
         )
         mock_get_nodes.assert_called_once_with(self.mock_env)
@@ -538,7 +660,7 @@ OPTION=   value
             }
         ]
 
-        self.assertEqual(
+        _assert_equal_list_of_dictionaries_without_order(
             expected, cmd_sbd.get_cluster_sbd_config(self.mock_env)
         )
         mock_get_nodes.assert_called_once_with(self.mock_env)
@@ -589,7 +711,7 @@ invalid value
             }
         ]
 
-        self.assertEqual(
+        _assert_equal_list_of_dictionaries_without_order(
             expected, cmd_sbd.get_cluster_sbd_config(self.mock_env)
         )
         mock_get_nodes.assert_called_once_with(self.mock_env)
diff --git a/pcs/test/test_lib_corosync_config_facade.py b/pcs/test/test_lib_corosync_config_facade.py
index 4a35fd9..4373d65 100644
--- a/pcs/test/test_lib_corosync_config_facade.py
+++ b/pcs/test/test_lib_corosync_config_facade.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 import re
 
 from pcs.test.tools.assertions import (
@@ -281,6 +281,34 @@ quorum {
         self.assertFalse(facade.need_qdevice_reload)
 
 
+class IsEnabledAutoTieBreaker(TestCase):
+    def test_enabled(self):
+        config = """\
+quorum {
+    auto_tie_breaker: 1
+}
+"""
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertTrue(facade.is_enabled_auto_tie_breaker())
+
+    def test_disabled(self):
+        config = """\
+quorum {
+    auto_tie_breaker: 0
+}
+"""
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertFalse(facade.is_enabled_auto_tie_breaker())
+
+    def test_no_value(self):
+        config = """\
+quorum {
+}
+"""
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertFalse(facade.is_enabled_auto_tie_breaker())
+
+
 class SetQuorumOptionsTest(TestCase):
     def get_two_node(self, facade):
         two_node = None
diff --git a/pcs/test/test_lib_corosync_config_parser.py b/pcs/test/test_lib_corosync_config_parser.py
index da20889..a68710b 100644
--- a/pcs/test/test_lib_corosync_config_parser.py
+++ b/pcs/test/test_lib_corosync_config_parser.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-import unittest
+from pcs.test.tools import pcs_unittest as unittest
 
 from pcs.test.tools.misc import ac
 
diff --git a/pcs/test/test_lib_corosync_live.py b/pcs/test/test_lib_corosync_live.py
index 0fc5eb2..f03d78b 100644
--- a/pcs/test/test_lib_corosync_live.py
+++ b/pcs/test/test_lib_corosync_live.py
@@ -5,13 +5,13 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
 import os.path
 
 from pcs.test.tools.assertions import assert_raise_library_error
 from pcs.test.tools.misc import get_test_resource as rc
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 
 from pcs import settings
 from pcs.common import report_codes
@@ -69,9 +69,10 @@ class ReloadConfigTest(TestCase):
 
     def test_success(self):
         cmd_retval = 0
-        cmd_output = "cmd output"
+        cmd_stdout = "cmd output"
+        cmd_stderr = ""
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (cmd_output, cmd_retval)
+        mock_runner.run.return_value = (cmd_stdout, cmd_stderr, cmd_retval)
 
         lib.reload_config(mock_runner)
 
@@ -81,9 +82,10 @@ class ReloadConfigTest(TestCase):
 
     def test_error(self):
         cmd_retval = 1
-        cmd_output = "cmd output"
+        cmd_stdout = "cmd output"
+        cmd_stderr = "cmd error"
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (cmd_output, cmd_retval)
+        mock_runner.run.return_value = (cmd_stdout, cmd_stderr, cmd_retval)
 
         assert_raise_library_error(
             lambda: lib.reload_config(mock_runner),
@@ -91,7 +93,7 @@ class ReloadConfigTest(TestCase):
                 severity.ERROR,
                 report_codes.COROSYNC_CONFIG_RELOAD_ERROR,
                 {
-                    "reason": cmd_output,
+                    "reason": "\n".join([cmd_stderr, cmd_stdout]),
                 }
             )
         )
@@ -107,7 +109,7 @@ class GetQuorumStatusTextTest(TestCase):
         self.quorum_tool = "/usr/sbin/corosync-quorumtool"
 
     def test_success(self):
-        self.mock_runner.run.return_value = ("status info", 0)
+        self.mock_runner.run.return_value = ("status info", "", 0)
         self.assertEqual(
             "status info",
             lib.get_quorum_status_text(self.mock_runner)
@@ -117,7 +119,7 @@ class GetQuorumStatusTextTest(TestCase):
         ])
 
     def test_success_with_retval_1(self):
-        self.mock_runner.run.return_value = ("status info", 1)
+        self.mock_runner.run.return_value = ("status info", "", 1)
         self.assertEqual(
             "status info",
             lib.get_quorum_status_text(self.mock_runner)
@@ -127,7 +129,7 @@ class GetQuorumStatusTextTest(TestCase):
         ])
 
     def test_error(self):
-        self.mock_runner.run.return_value = ("status error", 2)
+        self.mock_runner.run.return_value = ("some info", "status error", 2)
         assert_raise_library_error(
             lambda: lib.get_quorum_status_text(self.mock_runner),
             (
@@ -152,9 +154,10 @@ class SetExpectedVotesTest(TestCase):
 
     def test_success(self):
         cmd_retval = 0
-        cmd_output = "cmd output"
+        cmd_stdout = "cmd output"
+        cmd_stderr = ""
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (cmd_output, cmd_retval)
+        mock_runner.run.return_value = (cmd_stdout, cmd_stderr, cmd_retval)
 
         lib.set_expected_votes(mock_runner, 3)
 
@@ -164,9 +167,10 @@ class SetExpectedVotesTest(TestCase):
 
     def test_error(self):
         cmd_retval = 1
-        cmd_output = "cmd output"
+        cmd_stdout = "cmd output"
+        cmd_stderr = "cmd stderr"
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (cmd_output, cmd_retval)
+        mock_runner.run.return_value = (cmd_stdout, cmd_stderr, cmd_retval)
 
         assert_raise_library_error(
             lambda: lib.set_expected_votes(mock_runner, 3),
@@ -174,7 +178,7 @@ class SetExpectedVotesTest(TestCase):
                 severity.ERROR,
                 report_codes.COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR,
                 {
-                    "reason": cmd_output,
+                    "reason": cmd_stderr,
                 }
             )
         )
diff --git a/pcs/test/test_lib_corosync_qdevice_client.py b/pcs/test/test_lib_corosync_qdevice_client.py
index e0332f1..8c32c36 100644
--- a/pcs/test/test_lib_corosync_qdevice_client.py
+++ b/pcs/test/test_lib_corosync_qdevice_client.py
@@ -5,9 +5,9 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 from pcs.test.tools.assertions import assert_raise_library_error
 
 from pcs.common import report_codes
@@ -23,7 +23,7 @@ class GetStatusTextTest(TestCase):
         self.qdevice_tool = "/usr/sbin/corosync-qdevice-tool"
 
     def test_success(self):
-        self.mock_runner.run.return_value = ("status info", 0)
+        self.mock_runner.run.return_value = ("status info", "", 0)
         self.assertEqual(
             "status info",
             lib.get_status_text(self.mock_runner)
@@ -33,7 +33,7 @@ class GetStatusTextTest(TestCase):
         ])
 
     def test_success_verbose(self):
-        self.mock_runner.run.return_value = ("status info", 0)
+        self.mock_runner.run.return_value = ("status info", "", 0)
         self.assertEqual(
             "status info",
             lib.get_status_text(self.mock_runner, True)
@@ -43,14 +43,14 @@ class GetStatusTextTest(TestCase):
         ])
 
     def test_error(self):
-        self.mock_runner.run.return_value = ("status error", 1)
+        self.mock_runner.run.return_value = ("some info", "status error", 1)
         assert_raise_library_error(
             lambda: lib.get_status_text(self.mock_runner),
             (
                 severity.ERROR,
                 report_codes.COROSYNC_QUORUM_GET_STATUS_ERROR,
                 {
-                    "reason": "status error",
+                    "reason": "status error\nsome info",
                 }
             )
         )
diff --git a/pcs/test/test_lib_corosync_qdevice_net.py b/pcs/test/test_lib_corosync_qdevice_net.py
index 3d473f7..21c526b 100644
--- a/pcs/test/test_lib_corosync_qdevice_net.py
+++ b/pcs/test/test_lib_corosync_qdevice_net.py
@@ -5,12 +5,12 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
 import base64
 import os.path
 
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 from pcs.test.tools.assertions import assert_raise_library_error
 from pcs.test.tools.misc import get_test_resource
 
@@ -49,7 +49,7 @@ class QdeviceSetupTest(TestCase):
 
     def test_success(self, mock_is_dir_nonempty):
         mock_is_dir_nonempty.return_value = False
-        self.mock_runner.run.return_value = ("initialized", 0)
+        self.mock_runner.run.return_value = ("initialized", "", 0)
 
         lib.qdevice_setup(self.mock_runner)
 
@@ -73,7 +73,7 @@ class QdeviceSetupTest(TestCase):
 
     def test_init_tool_fail(self, mock_is_dir_nonempty):
         mock_is_dir_nonempty.return_value = False
-        self.mock_runner.run.return_value = ("test error", 1)
+        self.mock_runner.run.return_value = ("stdout", "test error", 1)
 
         assert_raise_library_error(
             lambda: lib.qdevice_setup(self.mock_runner),
@@ -82,7 +82,7 @@ class QdeviceSetupTest(TestCase):
                 report_codes.QDEVICE_INITIALIZATION_ERROR,
                 {
                     "model": "net",
-                    "reason": "test error",
+                    "reason": "test error\nstdout",
                 }
             )
         )
@@ -126,7 +126,7 @@ class QdeviceStatusGenericTest(TestCase):
         self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
 
     def test_success(self):
-        self.mock_runner.run.return_value = ("status info", 0)
+        self.mock_runner.run.return_value = ("status info", "", 0)
         self.assertEqual(
             "status info",
             lib.qdevice_status_generic_text(self.mock_runner)
@@ -134,7 +134,7 @@ class QdeviceStatusGenericTest(TestCase):
         self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-s"])
 
     def test_success_verbose(self):
-        self.mock_runner.run.return_value = ("status info", 0)
+        self.mock_runner.run.return_value = ("status info", "", 0)
         self.assertEqual(
             "status info",
             lib.qdevice_status_generic_text(self.mock_runner, True)
@@ -142,7 +142,7 @@ class QdeviceStatusGenericTest(TestCase):
         self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-s", "-v"])
 
     def test_error(self):
-        self.mock_runner.run.return_value = ("status error", 1)
+        self.mock_runner.run.return_value = ("some info", "status error", 1)
         assert_raise_library_error(
             lambda: lib.qdevice_status_generic_text(self.mock_runner),
             (
@@ -150,7 +150,7 @@ class QdeviceStatusGenericTest(TestCase):
                 report_codes.QDEVICE_GET_STATUS_ERROR,
                 {
                     "model": "net",
-                    "reason": "status error",
+                    "reason": "status error\nsome info",
                 }
             )
         )
@@ -162,7 +162,7 @@ class QdeviceStatusClusterTest(TestCase):
         self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
 
     def test_success(self):
-        self.mock_runner.run.return_value = ("status info", 0)
+        self.mock_runner.run.return_value = ("status info", "", 0)
         self.assertEqual(
             "status info",
             lib.qdevice_status_cluster_text(self.mock_runner)
@@ -170,7 +170,7 @@ class QdeviceStatusClusterTest(TestCase):
         self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-l"])
 
     def test_success_verbose(self):
-        self.mock_runner.run.return_value = ("status info", 0)
+        self.mock_runner.run.return_value = ("status info", "", 0)
         self.assertEqual(
             "status info",
             lib.qdevice_status_cluster_text(self.mock_runner, verbose=True)
@@ -178,7 +178,7 @@ class QdeviceStatusClusterTest(TestCase):
         self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-l", "-v"])
 
     def test_success_cluster(self):
-        self.mock_runner.run.return_value = ("status info", 0)
+        self.mock_runner.run.return_value = ("status info", "", 0)
         self.assertEqual(
             "status info",
             lib.qdevice_status_cluster_text(self.mock_runner, "cluster")
@@ -188,7 +188,7 @@ class QdeviceStatusClusterTest(TestCase):
         ])
 
     def test_success_cluster_verbose(self):
-        self.mock_runner.run.return_value = ("status info", 0)
+        self.mock_runner.run.return_value = ("status info", "", 0)
         self.assertEqual(
             "status info",
             lib.qdevice_status_cluster_text(self.mock_runner, "cluster", True)
@@ -198,7 +198,7 @@ class QdeviceStatusClusterTest(TestCase):
         ])
 
     def test_error(self):
-        self.mock_runner.run.return_value = ("status error", 1)
+        self.mock_runner.run.return_value = ("some info", "status error", 1)
         assert_raise_library_error(
             lambda: lib.qdevice_status_cluster_text(self.mock_runner),
             (
@@ -206,13 +206,63 @@ class QdeviceStatusClusterTest(TestCase):
                 report_codes.QDEVICE_GET_STATUS_ERROR,
                 {
                     "model": "net",
-                    "reason": "status error",
+                    "reason": "status error\nsome info",
                 }
             )
         )
         self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-l"])
 
 
+class QdeviceConnectedClustersTest(TestCase):
+    def test_empty_status(self):
+        status = ""
+        self.assertEqual(
+            [],
+            lib.qdevice_connected_clusters(status)
+        )
+
+    def test_one_cluster(self):
+        status = """\
+Cluster "rhel72":
+    Algorithm:          LMS
+    Tie-breaker:        Node with lowest node ID
+    Node ID 2:
+        Client address:         ::ffff:192.168.122.122:59738
+        Configured node list:   1, 2
+        Membership node list:   1, 2
+        Vote:                   ACK (ACK)
+    Node ID 1:
+        Client address:         ::ffff:192.168.122.121:43420
+        Configured node list:   1, 2
+        Membership node list:   1, 2
+        Vote:                   ACK (ACK)
+"""
+        self.assertEqual(
+            ["rhel72"],
+            lib.qdevice_connected_clusters(status)
+        )
+
+    def test_more_clusters(self):
+        status = """\
+Cluster "rhel72":
+Cluster "rhel73":
+"""
+        self.assertEqual(
+            ["rhel72", "rhel73"],
+            lib.qdevice_connected_clusters(status)
+        )
+
+    def test_invalid_status(self):
+        status = """\
+Cluster:
+    Cluster "rhel72":
+"""
+        self.assertEqual(
+            [],
+            lib.qdevice_connected_clusters(status)
+        )
+
+
 @mock.patch("pcs.lib.corosync.qdevice_net._get_output_certificate")
 @mock.patch("pcs.lib.corosync.qdevice_net._store_to_tmpfile")
 class QdeviceSignCertificateRequestTest(CertificateTestCase):
@@ -222,7 +272,7 @@ class QdeviceSignCertificateRequestTest(CertificateTestCase):
     )
     def test_success(self, mock_tmp_store, mock_get_cert):
         mock_tmp_store.return_value = self.mock_tmpfile
-        self.mock_runner.run.return_value = ("tool output", 0)
+        self.mock_runner.run.return_value = ("tool output", "", 0)
         mock_get_cert.return_value = "new certificate".encode("utf-8")
 
         result = lib.qdevice_sign_certificate_request(
@@ -293,7 +343,7 @@ class QdeviceSignCertificateRequestTest(CertificateTestCase):
     )
     def test_sign_error(self, mock_tmp_store, mock_get_cert):
         mock_tmp_store.return_value = self.mock_tmpfile
-        self.mock_runner.run.return_value = ("tool output error", 1)
+        self.mock_runner.run.return_value = ("stdout", "tool output error", 1)
 
         assert_raise_library_error(
             lambda: lib.qdevice_sign_certificate_request(
@@ -305,7 +355,7 @@ class QdeviceSignCertificateRequestTest(CertificateTestCase):
                 severity.ERROR,
                 report_codes.QDEVICE_CERTIFICATE_SIGN_ERROR,
                 {
-                    "reason": "tool output error",
+                    "reason": "tool output error\nstdout",
                 }
             )
         )
@@ -326,7 +376,7 @@ class QdeviceSignCertificateRequestTest(CertificateTestCase):
     )
     def test_output_read_error(self, mock_tmp_store, mock_get_cert):
         mock_tmp_store.return_value = self.mock_tmpfile
-        self.mock_runner.run.return_value = ("tool output", 0)
+        self.mock_runner.run.return_value = ("tool output", "", 0)
         mock_get_cert.side_effect = LibraryError
 
         self.assertRaises(
@@ -399,7 +449,7 @@ class ClientSetupTest(TestCase):
 
     @mock.patch("pcs.lib.corosync.qdevice_net.client_destroy")
     def test_success(self, mock_destroy):
-        self.mock_runner.run.return_value = ("tool output", 0)
+        self.mock_runner.run.return_value = ("tool output", "", 0)
 
         lib.client_setup(self.mock_runner, "certificate data".encode("utf-8"))
 
@@ -414,7 +464,7 @@ class ClientSetupTest(TestCase):
 
     @mock.patch("pcs.lib.corosync.qdevice_net.client_destroy")
     def test_init_error(self, mock_destroy):
-        self.mock_runner.run.return_value = ("tool output error", 1)
+        self.mock_runner.run.return_value = ("stdout", "tool output error", 1)
 
         assert_raise_library_error(
             lambda: lib.client_setup(
@@ -426,7 +476,7 @@ class ClientSetupTest(TestCase):
                 report_codes.QDEVICE_INITIALIZATION_ERROR,
                 {
                     "model": "net",
-                    "reason": "tool output error",
+                    "reason": "tool output error\nstdout",
                 }
             )
         )
@@ -448,7 +498,7 @@ class ClientGenerateCertificateRequestTest(CertificateTestCase):
         lambda: True
     )
     def test_success(self, mock_get_cert):
-        self.mock_runner.run.return_value = ("tool output", 0)
+        self.mock_runner.run.return_value = ("tool output", "", 0)
         mock_get_cert.return_value = "new certificate".encode("utf-8")
 
         result = lib.client_generate_certificate_request(
@@ -492,7 +542,7 @@ class ClientGenerateCertificateRequestTest(CertificateTestCase):
         lambda: True
     )
     def test_tool_error(self, mock_get_cert):
-        self.mock_runner.run.return_value = ("tool output error", 1)
+        self.mock_runner.run.return_value = ("stdout", "tool output error", 1)
 
         assert_raise_library_error(
             lambda: lib.client_generate_certificate_request(
@@ -504,7 +554,7 @@ class ClientGenerateCertificateRequestTest(CertificateTestCase):
                 report_codes.QDEVICE_INITIALIZATION_ERROR,
                 {
                     "model": "net",
-                    "reason": "tool output error",
+                    "reason": "tool output error\nstdout",
                 }
             )
         )
@@ -523,7 +573,7 @@ class ClientCertRequestToPk12Test(CertificateTestCase):
     )
     def test_success(self, mock_tmp_store, mock_get_cert):
         mock_tmp_store.return_value = self.mock_tmpfile
-        self.mock_runner.run.return_value = ("tool output", 0)
+        self.mock_runner.run.return_value = ("tool output", "", 0)
         mock_get_cert.return_value = "new certificate".encode("utf-8")
 
         result = lib.client_cert_request_to_pk12(
@@ -594,7 +644,7 @@ class ClientCertRequestToPk12Test(CertificateTestCase):
     )
     def test_transform_error(self, mock_tmp_store, mock_get_cert):
         mock_tmp_store.return_value = self.mock_tmpfile
-        self.mock_runner.run.return_value = ("tool output error", 1)
+        self.mock_runner.run.return_value = ("stdout", "tool output error", 1)
 
         assert_raise_library_error(
             lambda: lib.client_cert_request_to_pk12(
@@ -605,7 +655,7 @@ class ClientCertRequestToPk12Test(CertificateTestCase):
                 severity.ERROR,
                 report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR,
                 {
-                    "reason": "tool output error",
+                    "reason": "tool output error\nstdout",
                 }
             )
         )
@@ -625,7 +675,7 @@ class ClientCertRequestToPk12Test(CertificateTestCase):
     )
     def test_output_read_error(self, mock_tmp_store, mock_get_cert):
         mock_tmp_store.return_value = self.mock_tmpfile
-        self.mock_runner.run.return_value = ("tool output", 0)
+        self.mock_runner.run.return_value = ("tool output", "", 0)
         mock_get_cert.side_effect = LibraryError
 
         self.assertRaises(
@@ -657,7 +707,7 @@ class ClientImportCertificateAndKeyTest(CertificateTestCase):
     )
     def test_success(self, mock_tmp_store):
         mock_tmp_store.return_value = self.mock_tmpfile
-        self.mock_runner.run.return_value = ("tool output", 0)
+        self.mock_runner.run.return_value = ("tool output", "", 0)
 
         lib.client_import_certificate_and_key(
             self.mock_runner,
@@ -721,7 +771,7 @@ class ClientImportCertificateAndKeyTest(CertificateTestCase):
     )
     def test_import_error(self, mock_tmp_store):
         mock_tmp_store.return_value = self.mock_tmpfile
-        self.mock_runner.run.return_value = ("tool output error", 1)
+        self.mock_runner.run.return_value = ("stdout", "tool output error", 1)
 
         assert_raise_library_error(
             lambda: lib.client_import_certificate_and_key(
@@ -732,7 +782,7 @@ class ClientImportCertificateAndKeyTest(CertificateTestCase):
                 severity.ERROR,
                 report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR,
                 {
-                    "reason": "tool output error",
+                    "reason": "tool output error\nstdout",
                 }
             )
         )
diff --git a/pcs/test/test_lib_env.py b/pcs/test/test_lib_env.py
index c6322b7..205fd60 100644
--- a/pcs/test/test_lib_env.py
+++ b/pcs/test/test_lib_env.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 import logging
 from lxml import etree
 
@@ -16,7 +16,7 @@ from pcs.test.tools.assertions import (
 )
 from pcs.test.tools.custom_mock import MockLibraryReportProcessor
 from pcs.test.tools.misc import get_test_resource as rc
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 
 from pcs.lib.env import LibraryEnvironment
 from pcs.common import report_codes
diff --git a/pcs/test/test_lib_external.py b/pcs/test/test_lib_external.py
index 929a50d..6f60d7c 100644
--- a/pcs/test/test_lib_external.py
+++ b/pcs/test/test_lib_external.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 import os.path
 import logging
 try:
@@ -27,7 +27,7 @@ from pcs.test.tools.assertions import (
     assert_report_item_list_equal,
 )
 from pcs.test.tools.custom_mock import MockLibraryReportProcessor
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 
 from pcs import settings
 from pcs.common import report_codes
@@ -57,19 +57,23 @@ class CommandRunnerTest(TestCase):
         self.assertEqual(filtered_kwargs, kwargs)
 
     def test_basic(self, mock_popen):
-        expected_output = "expected output"
+        expected_stdout = "expected stdout"
+        expected_stderr = "expected stderr"
         expected_retval = 123
         command = ["a_command"]
         command_str = "a_command"
         mock_process = mock.MagicMock(spec_set=["communicate", "returncode"])
-        mock_process.communicate.return_value = (expected_output, "dummy")
+        mock_process.communicate.return_value = (
+            expected_stdout, expected_stderr
+        )
         mock_process.returncode = expected_retval
         mock_popen.return_value = mock_process
 
         runner = lib.CommandRunner(self.mock_logger, self.mock_reporter)
-        real_output, real_retval = runner.run(command)
+        real_stdout, real_stderr, real_retval = runner.run(command)
 
-        self.assertEqual(real_output, expected_output)
+        self.assertEqual(real_stdout, expected_stdout)
+        self.assertEqual(real_stderr, expected_stderr)
         self.assertEqual(real_retval, expected_retval)
         mock_process.communicate.assert_called_once_with(None)
         self.assert_popen_called_with(
@@ -82,9 +86,14 @@ class CommandRunnerTest(TestCase):
             mock.call("""\
 Finished running: {0}
 Return value: {1}
---Debug Output Start--
+--Debug Stdout Start--
 {2}
---Debug Output End--""".format(command_str, expected_retval, expected_output))
+--Debug Stdout End--
+--Debug Stderr Start--
+{3}
+--Debug Stderr End--""".format(
+                command_str, expected_retval, expected_stdout, expected_stderr
+            ))
         ]
         self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
         self.mock_logger.debug.assert_has_calls(logger_calls)
@@ -105,19 +114,23 @@ Return value: {1}
                     {
                         "command": command_str,
                         "return_value": expected_retval,
-                        "stdout": expected_output,
+                        "stdout": expected_stdout,
+                        "stderr": expected_stderr,
                     }
                 )
             ]
         )
 
     def test_env(self, mock_popen):
-        expected_output = "expected output"
+        expected_stdout = "expected output"
+        expected_stderr = "expected stderr"
         expected_retval = 123
         command = ["a_command"]
         command_str = "a_command"
         mock_process = mock.MagicMock(spec_set=["communicate", "returncode"])
-        mock_process.communicate.return_value = (expected_output, "dummy")
+        mock_process.communicate.return_value = (
+            expected_stdout, expected_stderr
+        )
         mock_process.returncode = expected_retval
         mock_popen.return_value = mock_process
 
@@ -126,12 +139,13 @@ Return value: {1}
             self.mock_reporter,
             {"a": "a", "b": "b"}
         )
-        real_output, real_retval = runner.run(
+        real_stdout, real_stderr, real_retval = runner.run(
             command,
             env_extend={"b": "B", "c": "C"}
         )
 
-        self.assertEqual(real_output, expected_output)
+        self.assertEqual(real_stdout, expected_stdout)
+        self.assertEqual(real_stderr, expected_stderr)
         self.assertEqual(real_retval, expected_retval)
         mock_process.communicate.assert_called_once_with(None)
         self.assert_popen_called_with(
@@ -144,9 +158,14 @@ Return value: {1}
             mock.call("""\
 Finished running: {0}
 Return value: {1}
---Debug Output Start--
+--Debug Stdout Start--
 {2}
---Debug Output End--""".format(command_str, expected_retval, expected_output))
+--Debug Stdout End--
+--Debug Stderr Start--
+{3}
+--Debug Stderr End--""".format(
+                command_str, expected_retval, expected_stdout, expected_stderr
+            ))
         ]
         self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
         self.mock_logger.debug.assert_has_calls(logger_calls)
@@ -167,27 +186,34 @@ Return value: {1}
                     {
                         "command": command_str,
                         "return_value": expected_retval,
-                        "stdout": expected_output,
+                        "stdout": expected_stdout,
+                        "stderr": expected_stderr,
                     }
                 )
             ]
         )
 
     def test_stdin(self, mock_popen):
-        expected_output = "expected output"
+        expected_stdout = "expected output"
+        expected_stderr = "expected stderr"
         expected_retval = 123
         command = ["a_command"]
         command_str = "a_command"
         stdin = "stdin string"
         mock_process = mock.MagicMock(spec_set=["communicate", "returncode"])
-        mock_process.communicate.return_value = (expected_output, "dummy")
+        mock_process.communicate.return_value = (
+            expected_stdout, expected_stderr
+        )
         mock_process.returncode = expected_retval
         mock_popen.return_value = mock_process
 
         runner = lib.CommandRunner(self.mock_logger, self.mock_reporter)
-        real_output, real_retval = runner.run(command, stdin_string=stdin)
+        real_stdout, real_stderr, real_retval = runner.run(
+            command, stdin_string=stdin
+        )
 
-        self.assertEqual(real_output, expected_output)
+        self.assertEqual(real_stdout, expected_stdout)
+        self.assertEqual(real_stderr, expected_stderr)
         self.assertEqual(real_retval, expected_retval)
         mock_process.communicate.assert_called_once_with(stdin)
         self.assert_popen_called_with(
@@ -204,9 +230,14 @@ Running: {0}
             mock.call("""\
 Finished running: {0}
 Return value: {1}
---Debug Output Start--
+--Debug Stdout Start--
 {2}
---Debug Output End--""".format(command_str, expected_retval, expected_output))
+--Debug Stdout End--
+--Debug Stderr Start--
+{3}
+--Debug Stderr End--""".format(
+                command_str, expected_retval, expected_stdout, expected_stderr
+            ))
         ]
         self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
         self.mock_logger.debug.assert_has_calls(logger_calls)
@@ -227,7 +258,8 @@ Return value: {1}
                     {
                         "command": command_str,
                         "return_value": expected_retval,
-                        "stdout": expected_output,
+                        "stdout": expected_stdout,
+                        "stderr": expected_stderr,
                     }
                 )
             ]
@@ -862,7 +894,7 @@ class ParallelCommunicationHelperTest(TestCase):
             mock.call(2, a=4),
         ]
         self.assertEqual(len(expected_calls), len(func.mock_calls))
-        func.assert_has_calls(expected_calls)
+        func.assert_has_calls(expected_calls, any_order=True)
         self.assertEqual(self.mock_reporter.report_item_list, [])
 
     def test_errors(self):
@@ -957,7 +989,7 @@ class ParallelCommunicationHelperTest(TestCase):
 class IsCmanClusterTest(TestCase):
     def template_test(self, is_cman, corosync_output, corosync_retval=0):
         mock_runner = mock.MagicMock(spec_set=lib.CommandRunner)
-        mock_runner.run.return_value = (corosync_output, corosync_retval)
+        mock_runner.run.return_value = (corosync_output, "", corosync_retval)
         self.assertEqual(is_cman, lib.is_cman_cluster(mock_runner))
         mock_runner.run.assert_called_once_with([
             os.path.join(settings.corosync_binaries, "corosync"),
@@ -1012,22 +1044,25 @@ Copyright (c) 2006-2009 Red Hat, Inc.
 
 
 @mock.patch("pcs.lib.external.is_systemctl")
+ at mock.patch("pcs.lib.external.is_service_installed")
 class DisableServiceTest(TestCase):
     def setUp(self):
         self.mock_runner = mock.MagicMock(spec_set=lib.CommandRunner)
         self.service = "service_name"
 
-    def test_systemctl(self, mock_systemctl):
+    def test_systemctl(self, mock_is_installed, mock_systemctl):
+        mock_is_installed.return_value = True
         mock_systemctl.return_value = True
-        self.mock_runner.run.return_value = ("", 0)
+        self.mock_runner.run.return_value = ("", "Removed symlink", 0)
         lib.disable_service(self.mock_runner, self.service)
         self.mock_runner.run.assert_called_once_with(
             ["systemctl", "disable", self.service + ".service"]
         )
 
-    def test_systemctl_failed(self, mock_systemctl):
+    def test_systemctl_failed(self, mock_is_installed, mock_systemctl):
+        mock_is_installed.return_value = True
         mock_systemctl.return_value = True
-        self.mock_runner.run.return_value = ("", 1)
+        self.mock_runner.run.return_value = ("", "Failed", 1)
         self.assertRaises(
             lib.DisableServiceError,
             lambda: lib.disable_service(self.mock_runner, self.service)
@@ -1036,21 +1071,19 @@ class DisableServiceTest(TestCase):
             ["systemctl", "disable", self.service + ".service"]
         )
 
-    @mock.patch("pcs.lib.external.is_service_installed")
     def test_not_systemctl(self, mock_is_installed, mock_systemctl):
         mock_is_installed.return_value = True
         mock_systemctl.return_value = False
-        self.mock_runner.run.return_value = ("", 0)
+        self.mock_runner.run.return_value = ("", "", 0)
         lib.disable_service(self.mock_runner, self.service)
         self.mock_runner.run.assert_called_once_with(
             ["chkconfig", self.service, "off"]
         )
 
-    @mock.patch("pcs.lib.external.is_service_installed")
     def test_not_systemctl_failed(self, mock_is_installed, mock_systemctl):
         mock_is_installed.return_value = True
         mock_systemctl.return_value = False
-        self.mock_runner.run.return_value = ("", 1)
+        self.mock_runner.run.return_value = ("", "error", 1)
         self.assertRaises(
             lib.DisableServiceError,
             lambda: lib.disable_service(self.mock_runner, self.service)
@@ -1059,7 +1092,14 @@ class DisableServiceTest(TestCase):
             ["chkconfig", self.service, "off"]
         )
 
-    @mock.patch("pcs.lib.external.is_service_installed")
+    def test_systemctl_not_installed(
+            self, mock_is_installed, mock_systemctl
+    ):
+        mock_is_installed.return_value = False
+        mock_systemctl.return_value = True
+        lib.disable_service(self.mock_runner, self.service)
+        self.assertEqual(self.mock_runner.run.call_count, 0)
+
     def test_not_systemctl_not_installed(
             self, mock_is_installed, mock_systemctl
     ):
@@ -1068,6 +1108,25 @@ class DisableServiceTest(TestCase):
         lib.disable_service(self.mock_runner, self.service)
         self.assertEqual(self.mock_runner.run.call_count, 0)
 
+    def test_instance_systemctl(self, mock_is_installed, mock_systemctl):
+        mock_is_installed.return_value = True
+        mock_systemctl.return_value = True
+        self.mock_runner.run.return_value = ("", "Removed symlink", 0)
+        lib.disable_service(self.mock_runner, self.service, instance="test")
+        self.mock_runner.run.assert_called_once_with([
+            "systemctl",
+            "disable",
+            "{0}@{1}.service".format(self.service, "test")
+        ])
+
+    def test_instance_not_systemctl(self, mock_is_installed, mock_systemctl):
+        mock_is_installed.return_value = True
+        mock_systemctl.return_value = False
+        self.mock_runner.run.return_value = ("", "", 0)
+        lib.disable_service(self.mock_runner, self.service, instance="test")
+        self.mock_runner.run.assert_called_once_with(
+            ["chkconfig", self.service, "off"]
+        )
 
 @mock.patch("pcs.lib.external.is_systemctl")
 class EnableServiceTest(TestCase):
@@ -1077,7 +1136,7 @@ class EnableServiceTest(TestCase):
 
     def test_systemctl(self, mock_systemctl):
         mock_systemctl.return_value = True
-        self.mock_runner.run.return_value = ("", 0)
+        self.mock_runner.run.return_value = ("", "Created symlink", 0)
         lib.enable_service(self.mock_runner, self.service)
         self.mock_runner.run.assert_called_once_with(
             ["systemctl", "enable", self.service + ".service"]
@@ -1085,7 +1144,7 @@ class EnableServiceTest(TestCase):
 
     def test_systemctl_failed(self, mock_systemctl):
         mock_systemctl.return_value = True
-        self.mock_runner.run.return_value = ("", 1)
+        self.mock_runner.run.return_value = ("", "Failed", 1)
         self.assertRaises(
             lib.EnableServiceError,
             lambda: lib.enable_service(self.mock_runner, self.service)
@@ -1096,7 +1155,7 @@ class EnableServiceTest(TestCase):
 
     def test_not_systemctl(self, mock_systemctl):
         mock_systemctl.return_value = False
-        self.mock_runner.run.return_value = ("", 0)
+        self.mock_runner.run.return_value = ("", "", 0)
         lib.enable_service(self.mock_runner, self.service)
         self.mock_runner.run.assert_called_once_with(
             ["chkconfig", self.service, "on"]
@@ -1104,7 +1163,7 @@ class EnableServiceTest(TestCase):
 
     def test_not_systemctl_failed(self, mock_systemctl):
         mock_systemctl.return_value = False
-        self.mock_runner.run.return_value = ("", 1)
+        self.mock_runner.run.return_value = ("", "error", 1)
         self.assertRaises(
             lib.EnableServiceError,
             lambda: lib.enable_service(self.mock_runner, self.service)
@@ -1113,6 +1172,24 @@ class EnableServiceTest(TestCase):
             ["chkconfig", self.service, "on"]
         )
 
+    def test_instance_systemctl(self, mock_systemctl):
+        mock_systemctl.return_value = True
+        self.mock_runner.run.return_value = ("", "Created symlink", 0)
+        lib.enable_service(self.mock_runner, self.service, instance="test")
+        self.mock_runner.run.assert_called_once_with([
+            "systemctl",
+            "enable",
+            "{0}@{1}.service".format(self.service, "test")
+        ])
+
+    def test_instance_not_systemctl(self, mock_systemctl):
+        mock_systemctl.return_value = False
+        self.mock_runner.run.return_value = ("", "", 0)
+        lib.enable_service(self.mock_runner, self.service, instance="test")
+        self.mock_runner.run.assert_called_once_with(
+            ["chkconfig", self.service, "on"]
+        )
+
 
 @mock.patch("pcs.lib.external.is_systemctl")
 class StartServiceTest(TestCase):
@@ -1122,7 +1199,7 @@ class StartServiceTest(TestCase):
 
     def test_systemctl(self, mock_systemctl):
         mock_systemctl.return_value = True
-        self.mock_runner.run.return_value = ("", 0)
+        self.mock_runner.run.return_value = ("", "", 0)
         lib.start_service(self.mock_runner, self.service)
         self.mock_runner.run.assert_called_once_with(
             ["systemctl", "start", self.service + ".service"]
@@ -1130,7 +1207,7 @@ class StartServiceTest(TestCase):
 
     def test_systemctl_failed(self, mock_systemctl):
         mock_systemctl.return_value = True
-        self.mock_runner.run.return_value = ("", 1)
+        self.mock_runner.run.return_value = ("", "Failed", 1)
         self.assertRaises(
             lib.StartServiceError,
             lambda: lib.start_service(self.mock_runner, self.service)
@@ -1141,7 +1218,7 @@ class StartServiceTest(TestCase):
 
     def test_not_systemctl(self, mock_systemctl):
         mock_systemctl.return_value = False
-        self.mock_runner.run.return_value = ("", 0)
+        self.mock_runner.run.return_value = ("Starting...", "", 0)
         lib.start_service(self.mock_runner, self.service)
         self.mock_runner.run.assert_called_once_with(
             ["service", self.service, "start"]
@@ -1149,7 +1226,7 @@ class StartServiceTest(TestCase):
 
     def test_not_systemctl_failed(self, mock_systemctl):
         mock_systemctl.return_value = False
-        self.mock_runner.run.return_value = ("", 1)
+        self.mock_runner.run.return_value = ("", "unrecognized", 1)
         self.assertRaises(
             lib.StartServiceError,
             lambda: lib.start_service(self.mock_runner, self.service)
@@ -1158,6 +1235,22 @@ class StartServiceTest(TestCase):
             ["service", self.service, "start"]
         )
 
+    def test_instance_systemctl(self, mock_systemctl):
+        mock_systemctl.return_value = True
+        self.mock_runner.run.return_value = ("", "", 0)
+        lib.start_service(self.mock_runner, self.service, instance="test")
+        self.mock_runner.run.assert_called_once_with([
+            "systemctl", "start", "{0}@{1}.service".format(self.service, "test")
+        ])
+
+    def test_instance_not_systemctl(self, mock_systemctl):
+        mock_systemctl.return_value = False
+        self.mock_runner.run.return_value = ("Starting...", "", 0)
+        lib.start_service(self.mock_runner, self.service, instance="test")
+        self.mock_runner.run.assert_called_once_with(
+            ["service", self.service, "start"]
+        )
+
 
 @mock.patch("pcs.lib.external.is_systemctl")
 class StopServiceTest(TestCase):
@@ -1167,7 +1260,7 @@ class StopServiceTest(TestCase):
 
     def test_systemctl(self, mock_systemctl):
         mock_systemctl.return_value = True
-        self.mock_runner.run.return_value = ("", 0)
+        self.mock_runner.run.return_value = ("", "", 0)
         lib.stop_service(self.mock_runner, self.service)
         self.mock_runner.run.assert_called_once_with(
             ["systemctl", "stop", self.service + ".service"]
@@ -1175,7 +1268,7 @@ class StopServiceTest(TestCase):
 
     def test_systemctl_failed(self, mock_systemctl):
         mock_systemctl.return_value = True
-        self.mock_runner.run.return_value = ("", 1)
+        self.mock_runner.run.return_value = ("", "Failed", 1)
         self.assertRaises(
             lib.StopServiceError,
             lambda: lib.stop_service(self.mock_runner, self.service)
@@ -1186,7 +1279,7 @@ class StopServiceTest(TestCase):
 
     def test_not_systemctl(self, mock_systemctl):
         mock_systemctl.return_value = False
-        self.mock_runner.run.return_value = ("", 0)
+        self.mock_runner.run.return_value = ("Stopping...", "", 0)
         lib.stop_service(self.mock_runner, self.service)
         self.mock_runner.run.assert_called_once_with(
             ["service", self.service, "stop"]
@@ -1194,7 +1287,7 @@ class StopServiceTest(TestCase):
 
     def test_not_systemctl_failed(self, mock_systemctl):
         mock_systemctl.return_value = False
-        self.mock_runner.run.return_value = ("", 1)
+        self.mock_runner.run.return_value = ("", "unrecognized", 1)
         self.assertRaises(
             lib.StopServiceError,
             lambda: lib.stop_service(self.mock_runner, self.service)
@@ -1203,6 +1296,22 @@ class StopServiceTest(TestCase):
             ["service", self.service, "stop"]
         )
 
+    def test_instance_systemctl(self, mock_systemctl):
+        mock_systemctl.return_value = True
+        self.mock_runner.run.return_value = ("", "", 0)
+        lib.stop_service(self.mock_runner, self.service, instance="test")
+        self.mock_runner.run.assert_called_once_with([
+            "systemctl", "stop", "{0}@{1}.service".format(self.service, "test")
+        ])
+
+    def test_instance_not_systemctl(self, mock_systemctl):
+        mock_systemctl.return_value = False
+        self.mock_runner.run.return_value = ("Stopping...", "", 0)
+        lib.stop_service(self.mock_runner, self.service, instance="test")
+        self.mock_runner.run.assert_called_once_with(
+            ["service", self.service, "stop"]
+        )
+
 
 class KillServicesTest(TestCase):
     def setUp(self):
@@ -1210,14 +1319,14 @@ class KillServicesTest(TestCase):
         self.services = ["service1", "service2"]
 
     def test_success(self):
-        self.mock_runner.run.return_value = ("", 0)
+        self.mock_runner.run.return_value = ("", "", 0)
         lib.kill_services(self.mock_runner, self.services)
         self.mock_runner.run.assert_called_once_with(
             ["killall", "--quiet", "--signal", "9", "--"] + self.services
         )
 
     def test_failed(self):
-        self.mock_runner.run.return_value = ("error", 1)
+        self.mock_runner.run.return_value = ("", "error", 1)
         self.assertRaises(
             lib.KillServicesError,
             lambda: lib.kill_services(self.mock_runner, self.services)
@@ -1227,7 +1336,7 @@ class KillServicesTest(TestCase):
         )
 
     def test_service_not_running(self):
-        self.mock_runner.run.return_value = ("", 1)
+        self.mock_runner.run.return_value = ("", "", 1)
         lib.kill_services(self.mock_runner, self.services)
         self.mock_runner.run.assert_called_once_with(
             ["killall", "--quiet", "--signal", "9", "--"] + self.services
@@ -1271,7 +1380,7 @@ class IsServiceEnabledTest(TestCase):
 
     def test_systemctl_enabled(self, mock_systemctl):
         mock_systemctl.return_value = True
-        self.mock_runner.run.return_value = ("enabled\n", 0)
+        self.mock_runner.run.return_value = ("enabled\n", "", 0)
         self.assertTrue(lib.is_service_enabled(self.mock_runner, self.service))
         self.mock_runner.run.assert_called_once_with(
             ["systemctl", "is-enabled", self.service + ".service"]
@@ -1279,7 +1388,7 @@ class IsServiceEnabledTest(TestCase):
 
     def test_systemctl_disabled(self, mock_systemctl):
         mock_systemctl.return_value = True
-        self.mock_runner.run.return_value = ("disabled\n", 2)
+        self.mock_runner.run.return_value = ("disabled\n", "", 2)
         self.assertFalse(lib.is_service_enabled(self.mock_runner, self.service))
         self.mock_runner.run.assert_called_once_with(
             ["systemctl", "is-enabled", self.service + ".service"]
@@ -1287,7 +1396,7 @@ class IsServiceEnabledTest(TestCase):
 
     def test_not_systemctl_enabled(self, mock_systemctl):
         mock_systemctl.return_value = False
-        self.mock_runner.run.return_value = ("", 0)
+        self.mock_runner.run.return_value = ("", "", 0)
         self.assertTrue(lib.is_service_enabled(self.mock_runner, self.service))
         self.mock_runner.run.assert_called_once_with(
             ["chkconfig", self.service]
@@ -1295,7 +1404,7 @@ class IsServiceEnabledTest(TestCase):
 
     def test_not_systemctl_disabled(self, mock_systemctl):
         mock_systemctl.return_value = False
-        self.mock_runner.run.return_value = ("", 3)
+        self.mock_runner.run.return_value = ("", "", 3)
         self.assertFalse(lib.is_service_enabled(self.mock_runner, self.service))
         self.mock_runner.run.assert_called_once_with(
             ["chkconfig", self.service]
@@ -1310,7 +1419,7 @@ class IsServiceRunningTest(TestCase):
 
     def test_systemctl_running(self, mock_systemctl):
         mock_systemctl.return_value = True
-        self.mock_runner.run.return_value = ("", 0)
+        self.mock_runner.run.return_value = ("active", "", 0)
         self.assertTrue(lib.is_service_running(self.mock_runner, self.service))
         self.mock_runner.run.assert_called_once_with(
             ["systemctl", "is-active", self.service + ".service"]
@@ -1318,7 +1427,7 @@ class IsServiceRunningTest(TestCase):
 
     def test_systemctl_not_running(self, mock_systemctl):
         mock_systemctl.return_value = True
-        self.mock_runner.run.return_value = ("", 2)
+        self.mock_runner.run.return_value = ("inactive", "", 2)
         self.assertFalse(lib.is_service_running(self.mock_runner, self.service))
         self.mock_runner.run.assert_called_once_with(
             ["systemctl", "is-active", self.service + ".service"]
@@ -1326,7 +1435,7 @@ class IsServiceRunningTest(TestCase):
 
     def test_not_systemctl_running(self, mock_systemctl):
         mock_systemctl.return_value = False
-        self.mock_runner.run.return_value = ("", 0)
+        self.mock_runner.run.return_value = ("is running", "", 0)
         self.assertTrue(lib.is_service_running(self.mock_runner, self.service))
         self.mock_runner.run.assert_called_once_with(
             ["service", self.service, "status"]
@@ -1334,7 +1443,7 @@ class IsServiceRunningTest(TestCase):
 
     def test_not_systemctl_not_running(self, mock_systemctl):
         mock_systemctl.return_value = False
-        self.mock_runner.run.return_value = ("", 3)
+        self.mock_runner.run.return_value = ("is stopped", "", 3)
         self.assertFalse(lib.is_service_running(self.mock_runner, self.service))
         self.mock_runner.run.assert_called_once_with(
             ["service", self.service, "status"]
@@ -1407,7 +1516,7 @@ sbd.service                                 enabled
 pacemaker.service                           enabled
 
 3 unit files listed.
-""", 0)
+""", "", 0)
         self.assertEqual(
             lib.get_systemd_services(self.mock_runner),
             ["pcsd", "sbd", "pacemaker"]
@@ -1419,7 +1528,7 @@ pacemaker.service                           enabled
 
     def test_failed(self, mock_is_systemctl):
         mock_is_systemctl.return_value = True
-        self.mock_runner.run.return_value = ("failed", 1)
+        self.mock_runner.run.return_value = ("stdout", "failed", 1)
         self.assertEqual(lib.get_systemd_services(self.mock_runner), [])
         self.assertEqual(mock_is_systemctl.call_count, 1)
         self.mock_runner.run.assert_called_once_with(
@@ -1428,10 +1537,9 @@ pacemaker.service                           enabled
 
     def test_not_systemd(self, mock_is_systemctl):
         mock_is_systemctl.return_value = False
-        self.mock_runner.run.return_value = ("", 0)
         self.assertEqual(lib.get_systemd_services(self.mock_runner), [])
-        self.assertEqual(mock_is_systemctl.call_count, 1)
-        self.assertEqual(self.mock_runner.call_count, 0)
+        mock_is_systemctl.assert_called_once_with()
+        self.mock_runner.assert_not_called()
 
 
 @mock.patch("pcs.lib.external.is_systemctl")
@@ -1445,24 +1553,20 @@ class GetNonSystemdServicesTest(TestCase):
 pcsd           	0:off	1:off	2:on	3:on	4:on	5:on	6:off
 sbd            	0:off	1:on	2:on	3:on	4:on	5:on	6:off
 pacemaker      	0:off	1:off	2:off	3:off	4:off	5:off	6:off
-""", 0)
+""", "", 0)
         self.assertEqual(
             lib.get_non_systemd_services(self.mock_runner),
             ["pcsd", "sbd", "pacemaker"]
         )
         self.assertEqual(mock_is_systemctl.call_count, 1)
-        self.mock_runner.run.assert_called_once_with(
-            ["chkconfig"], ignore_stderr=True
-        )
+        self.mock_runner.run.assert_called_once_with(["chkconfig"])
 
     def test_failed(self, mock_is_systemctl):
         mock_is_systemctl.return_value = False
-        self.mock_runner.run.return_value = ("failed", 1)
+        self.mock_runner.run.return_value = ("stdout", "failed", 1)
         self.assertEqual(lib.get_non_systemd_services(self.mock_runner), [])
         self.assertEqual(mock_is_systemctl.call_count, 1)
-        self.mock_runner.run.assert_called_once_with(
-            ["chkconfig"], ignore_stderr=True
-        )
+        self.mock_runner.run.assert_called_once_with(["chkconfig"])
 
     def test_systemd(self, mock_is_systemctl):
         mock_is_systemctl.return_value = True
@@ -1470,3 +1574,20 @@ pacemaker      	0:off	1:off	2:off	3:off	4:off	5:off	6:off
         self.assertEqual(lib.get_non_systemd_services(self.mock_runner), [])
         self.assertEqual(mock_is_systemctl.call_count, 1)
         self.assertEqual(self.mock_runner.call_count, 0)
+
+ at mock.patch("pcs.lib.external.is_systemctl")
+class EnsureIsSystemctlTest(TestCase):
+    def test_systemd(self, mock_is_systemctl):
+        mock_is_systemctl.return_value = True
+        lib.ensure_is_systemd()
+
+    def test_not_systemd(self, mock_is_systemctl):
+        mock_is_systemctl.return_value = False
+        assert_raise_library_error(
+            lib.ensure_is_systemd,
+            (
+                severity.ERROR,
+                report_codes.UNSUPPORTED_OPERATION_ON_NON_SYSTEMD_SYSTEMS,
+                {}
+            )
+        )
diff --git a/pcs/test/test_lib_node.py b/pcs/test/test_lib_node.py
index 6c841d3..caf128f 100644
--- a/pcs/test/test_lib_node.py
+++ b/pcs/test/test_lib_node.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
 import pcs.lib.node as lib
 
diff --git a/pcs/test/test_lib_nodes_task.py b/pcs/test/test_lib_nodes_task.py
index cff88eb..6f05b15 100644
--- a/pcs/test/test_lib_nodes_task.py
+++ b/pcs/test/test_lib_nodes_task.py
@@ -5,14 +5,14 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
 from pcs.test.tools.assertions import (
     assert_raise_library_error,
     assert_report_item_list_equal,
 )
 from pcs.test.tools.custom_mock import MockLibraryReportProcessor
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 
 from pcs.common import report_codes
 from pcs.lib.external import NodeCommunicator, NodeAuthenticationException
diff --git a/pcs/test/test_lib_pacemaker.py b/pcs/test/test_lib_pacemaker.py
index 0edee5c..7ca7b77 100644
--- a/pcs/test/test_lib_pacemaker.py
+++ b/pcs/test/test_lib_pacemaker.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 import os.path
 
 from pcs.test.tools.assertions import (
@@ -13,7 +13,7 @@ from pcs.test.tools.assertions import (
     assert_xml_equal,
 )
 from pcs.test.tools.misc import get_test_resource as rc
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 from pcs.test.tools.xml import XmlManipulation
 
 from pcs import settings
@@ -64,21 +64,31 @@ class LibraryPacemakerNodeStatusTest(LibraryPacemakerTest):
 
 class GetClusterStatusXmlTest(LibraryPacemakerTest):
     def test_success(self):
-        expected_xml = "<xml />"
+        expected_stdout = "<xml />"
+        expected_stderr = ""
         expected_retval = 0
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_xml, expected_retval)
+        mock_runner.run.return_value = (
+            expected_stdout,
+            expected_stderr,
+            expected_retval
+        )
 
         real_xml = lib.get_cluster_status_xml(mock_runner)
 
         mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
-        self.assertEqual(expected_xml, real_xml)
+        self.assertEqual(expected_stdout, real_xml)
 
     def test_error(self):
-        expected_error = "some error"
+        expected_stdout = "some info"
+        expected_stderr = "some error"
         expected_retval = 1
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_error, expected_retval)
+        mock_runner.run.return_value = (
+            expected_stdout,
+            expected_stderr,
+            expected_retval
+        )
 
         assert_raise_library_error(
             lambda: lib.get_cluster_status_xml(mock_runner),
@@ -86,8 +96,7 @@ class GetClusterStatusXmlTest(LibraryPacemakerTest):
                 Severity.ERROR,
                 report_codes.CRM_MON_ERROR,
                 {
-                    "return_value": expected_retval,
-                    "stdout": expected_error,
+                    "reason": expected_stderr + "\n" + expected_stdout,
                 }
             )
         )
@@ -96,23 +105,33 @@ class GetClusterStatusXmlTest(LibraryPacemakerTest):
 
 class GetCibXmlTest(LibraryPacemakerTest):
     def test_success(self):
-        expected_xml = "<xml />"
+        expected_stdout = "<xml />"
+        expected_stderr = ""
         expected_retval = 0
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_xml, expected_retval)
+        mock_runner.run.return_value = (
+            expected_stdout,
+            expected_stderr,
+            expected_retval
+        )
 
         real_xml = lib.get_cib_xml(mock_runner)
 
         mock_runner.run.assert_called_once_with(
             [self.path("cibadmin"), "--local", "--query"]
         )
-        self.assertEqual(expected_xml, real_xml)
+        self.assertEqual(expected_stdout, real_xml)
 
     def test_error(self):
-        expected_error = "some error"
+        expected_stdout = "some info"
+        expected_stderr = "some error"
         expected_retval = 1
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_error, expected_retval)
+        mock_runner.run.return_value = (
+            expected_stdout,
+            expected_stderr,
+            expected_retval
+        )
 
         assert_raise_library_error(
             lambda: lib.get_cib_xml(mock_runner),
@@ -120,8 +139,7 @@ class GetCibXmlTest(LibraryPacemakerTest):
                 Severity.ERROR,
                 report_codes.CIB_LOAD_ERROR,
                 {
-                    "return_value": expected_retval,
-                    "stdout": expected_error,
+                    "reason": expected_stderr + "\n" + expected_stdout,
                 }
             )
         )
@@ -131,11 +149,16 @@ class GetCibXmlTest(LibraryPacemakerTest):
         )
 
     def test_success_scope(self):
-        expected_xml = "<xml />"
+        expected_stdout = "<xml />"
+        expected_stderr = ""
         expected_retval = 0
         scope = "test_scope"
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_xml, expected_retval)
+        mock_runner.run.return_value = (
+            expected_stdout,
+            expected_stderr,
+            expected_retval
+        )
 
         real_xml = lib.get_cib_xml(mock_runner, scope)
 
@@ -145,14 +168,19 @@ class GetCibXmlTest(LibraryPacemakerTest):
                 "--local", "--query", "--scope={0}".format(scope)
             ]
         )
-        self.assertEqual(expected_xml, real_xml)
+        self.assertEqual(expected_stdout, real_xml)
 
     def test_scope_error(self):
-        expected_error = "some error"
+        expected_stdout = "some info"
+        expected_stderr = "some error"
         expected_retval = 6
         scope = "test_scope"
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_error, expected_retval)
+        mock_runner.run.return_value = (
+            expected_stdout,
+            expected_stderr,
+            expected_retval
+        )
 
         assert_raise_library_error(
             lambda: lib.get_cib_xml(mock_runner, scope=scope),
@@ -161,8 +189,7 @@ class GetCibXmlTest(LibraryPacemakerTest):
                 report_codes.CIB_LOAD_ERROR_SCOPE_MISSING,
                 {
                     "scope": scope,
-                    "return_value": expected_retval,
-                    "stdout": expected_error,
+                    "reason": expected_stderr + "\n" + expected_stdout,
                 }
             )
         )
@@ -194,10 +221,15 @@ class GetCibTest(LibraryPacemakerTest):
 class ReplaceCibConfigurationTest(LibraryPacemakerTest):
     def test_success(self):
         xml = "<xml/>"
-        expected_output = "expected output"
+        expected_stdout = "expected output"
+        expected_stderr = ""
         expected_retval = 0
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_output, expected_retval)
+        mock_runner.run.return_value = (
+            expected_stdout,
+            expected_stderr,
+            expected_retval
+        )
 
         lib.replace_cib_configuration(
             mock_runner,
@@ -214,10 +246,15 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest):
 
     def test_cib_upgraded(self):
         xml = "<xml/>"
-        expected_output = "expected output"
+        expected_stdout = "expected output"
+        expected_stderr = ""
         expected_retval = 0
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_output, expected_retval)
+        mock_runner.run.return_value = (
+            expected_stdout,
+            expected_stderr,
+            expected_retval
+        )
 
         lib.replace_cib_configuration(
             mock_runner, XmlManipulation.from_str(xml).tree, True
@@ -230,10 +267,15 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest):
 
     def test_error(self):
         xml = "<xml/>"
-        expected_error = "expected error"
+        expected_stdout = "expected output"
+        expected_stderr = "expected stderr"
         expected_retval = 1
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_error, expected_retval)
+        mock_runner.run.return_value = (
+            expected_stdout,
+            expected_stderr,
+            expected_retval
+        )
 
         assert_raise_library_error(
             lambda: lib.replace_cib_configuration(
@@ -245,8 +287,8 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest):
                 Severity.ERROR,
                 report_codes.CIB_PUSH_ERROR,
                 {
-                    "return_value": expected_retval,
-                    "stdout": expected_error,
+                    "reason": expected_stderr,
+                    "pushed_cib": expected_stdout,
                 }
             )
         )
@@ -261,10 +303,15 @@ class ReplaceCibConfigurationTest(LibraryPacemakerTest):
 
 class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest):
     def test_offline(self):
-        expected_error = "some error"
+        expected_stdout = "some info"
+        expected_stderr = "some error"
         expected_retval = 1
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_error, expected_retval)
+        mock_runner.run.return_value = (
+            expected_stdout,
+            expected_stderr,
+            expected_retval
+        )
 
         self.assertEqual(
             {"offline": True},
@@ -273,10 +320,15 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest):
         mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
 
     def test_invalid_status(self):
-        expected_xml = "some error"
+        expected_stdout = "invalid xml"
+        expected_stderr = ""
         expected_retval = 0
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_xml, expected_retval)
+        mock_runner.run.return_value = (
+            expected_stdout,
+            expected_stderr,
+            expected_retval
+        )
 
         assert_raise_library_error(
             lambda: lib.get_local_node_status(mock_runner),
@@ -310,9 +362,9 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest):
             ),
         ]
         return_value_list = [
-            (str(self.status), 0),
-            (node_id, 0),
-            (node_name, 0)
+            (str(self.status), "", 0),
+            (node_id, "", 0),
+            (node_name, "", 0)
         ]
         mock_runner.run.side_effect = return_value_list
 
@@ -339,9 +391,9 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest):
             ),
         ]
         return_value_list = [
-            (str(self.status), 0),
-            (node_id, 0),
-            (node_name_bad, 0)
+            (str(self.status), "", 0),
+            (node_id, "", 0),
+            (node_name_bad, "", 0)
         ]
         mock_runner.run.side_effect = return_value_list
 
@@ -370,8 +422,8 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest):
             mock.call([self.path("crm_node"), "--cluster-id"]),
         ]
         return_value_list = [
-            (str(self.status), 0),
-            ("some error", 1),
+            (str(self.status), "", 0),
+            ("", "some error", 1),
         ]
         mock_runner.run.side_effect = return_value_list
 
@@ -403,9 +455,9 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest):
             ),
         ]
         return_value_list = [
-            (str(self.status), 0),
-            (node_id, 0),
-            ("some error", 1),
+            (str(self.status), "", 0),
+            (node_id, "", 0),
+            ("", "some error", 1),
         ]
         mock_runner.run.side_effect = return_value_list
 
@@ -437,9 +489,9 @@ class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest):
             ),
         ]
         return_value_list = [
-            (str(self.status), 0),
-            (node_id, 0),
-            ("(null)", 0),
+            (str(self.status), "", 0),
+            (node_id, "", 0),
+            ("(null)", "", 0),
         ]
         mock_runner.run.side_effect = return_value_list
 
@@ -465,15 +517,16 @@ class ResourceCleanupTest(LibraryPacemakerTest):
         return str(XmlManipulation(doc))
 
     def test_basic(self):
-        expected_output = "expected output"
+        expected_stdout = "expected output"
+        expected_stderr = "expected stderr"
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
         call_list = [
             mock.call(self.crm_mon_cmd()),
             mock.call([self.path("crm_resource"), "--cleanup"]),
         ]
         return_value_list = [
-            (self.fixture_status_xml(1, 1), 0),
-            (expected_output, 0),
+            (self.fixture_status_xml(1, 1), "", 0),
+            (expected_stdout, expected_stderr, 0),
         ]
         mock_runner.run.side_effect = return_value_list
 
@@ -482,11 +535,18 @@ class ResourceCleanupTest(LibraryPacemakerTest):
         self.assertEqual(len(return_value_list), len(call_list))
         self.assertEqual(len(return_value_list), mock_runner.run.call_count)
         mock_runner.run.assert_has_calls(call_list)
-        self.assertEqual(expected_output, real_output)
+        self.assertEqual(
+            expected_stdout + "\n" + expected_stderr,
+            real_output
+        )
 
     def test_threshold_exceeded(self):
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (self.fixture_status_xml(1000, 1000), 0)
+        mock_runner.run.return_value = (
+            self.fixture_status_xml(1000, 1000),
+            "",
+            0
+        )
 
         assert_raise_library_error(
             lambda: lib.resource_cleanup(mock_runner),
@@ -501,49 +561,62 @@ class ResourceCleanupTest(LibraryPacemakerTest):
         mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
 
     def test_forced(self):
-        expected_output = "expected output"
+        expected_stdout = "expected output"
+        expected_stderr = "expected stderr"
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_output, 0)
+        mock_runner.run.return_value = (expected_stdout, expected_stderr, 0)
 
         real_output = lib.resource_cleanup(mock_runner, force=True)
 
         mock_runner.run.assert_called_once_with(
             [self.path("crm_resource"), "--cleanup"]
         )
-        self.assertEqual(expected_output, real_output)
+        self.assertEqual(
+            expected_stdout + "\n" + expected_stderr,
+            real_output
+        )
 
     def test_resource(self):
         resource = "test_resource"
-        expected_output = "expected output"
+        expected_stdout = "expected output"
+        expected_stderr = "expected stderr"
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_output, 0)
+        mock_runner.run.return_value = (expected_stdout, expected_stderr, 0)
 
         real_output = lib.resource_cleanup(mock_runner, resource=resource)
 
         mock_runner.run.assert_called_once_with(
             [self.path("crm_resource"), "--cleanup", "--resource", resource]
         )
-        self.assertEqual(expected_output, real_output)
+        self.assertEqual(
+            expected_stdout + "\n" + expected_stderr,
+            real_output
+        )
 
     def test_node(self):
         node = "test_node"
-        expected_output = "expected output"
+        expected_stdout = "expected output"
+        expected_stderr = "expected stderr"
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_output, 0)
+        mock_runner.run.return_value = (expected_stdout, expected_stderr, 0)
 
         real_output = lib.resource_cleanup(mock_runner, node=node)
 
         mock_runner.run.assert_called_once_with(
             [self.path("crm_resource"), "--cleanup", "--node", node]
         )
-        self.assertEqual(expected_output, real_output)
+        self.assertEqual(
+            expected_stdout + "\n" + expected_stderr,
+            real_output
+        )
 
     def test_node_and_resource(self):
         node = "test_node"
         resource = "test_resource"
-        expected_output = "expected output"
+        expected_stdout = "expected output"
+        expected_stderr = "expected stderr"
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_output, 0)
+        mock_runner.run.return_value = (expected_stdout, expected_stderr, 0)
 
         real_output = lib.resource_cleanup(
             mock_runner, resource=resource, node=node
@@ -555,13 +628,21 @@ class ResourceCleanupTest(LibraryPacemakerTest):
                 "--cleanup", "--resource", resource, "--node", node
             ]
         )
-        self.assertEqual(expected_output, real_output)
+        self.assertEqual(
+            expected_stdout + "\n" + expected_stderr,
+            real_output
+        )
 
     def test_error_state(self):
-        expected_error = "some error"
+        expected_stdout = "some info"
+        expected_stderr = "some error"
         expected_retval = 1
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_error, expected_retval)
+        mock_runner.run.return_value = (
+            expected_stdout,
+            expected_stderr,
+            expected_retval
+        )
 
         assert_raise_library_error(
             lambda: lib.resource_cleanup(mock_runner),
@@ -569,8 +650,7 @@ class ResourceCleanupTest(LibraryPacemakerTest):
                 Severity.ERROR,
                 report_codes.CRM_MON_ERROR,
                 {
-                    "return_value": expected_retval,
-                    "stdout": expected_error,
+                    "reason": expected_stderr + "\n" + expected_stdout,
                 }
             )
         )
@@ -578,7 +658,8 @@ class ResourceCleanupTest(LibraryPacemakerTest):
         mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
 
     def test_error_cleanup(self):
-        expected_error = "expected error"
+        expected_stdout = "some info"
+        expected_stderr = "some error"
         expected_retval = 1
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
         call_list = [
@@ -586,8 +667,8 @@ class ResourceCleanupTest(LibraryPacemakerTest):
             mock.call([self.path("crm_resource"), "--cleanup"]),
         ]
         return_value_list = [
-            (self.fixture_status_xml(1, 1), 0),
-            (expected_error, expected_retval),
+            (self.fixture_status_xml(1, 1), "", 0),
+            (expected_stdout, expected_stderr, expected_retval),
         ]
         mock_runner.run.side_effect = return_value_list
 
@@ -597,8 +678,7 @@ class ResourceCleanupTest(LibraryPacemakerTest):
                 Severity.ERROR,
                 report_codes.RESOURCE_CLEANUP_ERROR,
                 {
-                    "return_value": expected_retval,
-                    "stdout": expected_error,
+                    "reason": expected_stderr + "\n" + expected_stdout,
                 }
             )
         )
@@ -609,10 +689,33 @@ class ResourceCleanupTest(LibraryPacemakerTest):
 
 class ResourcesWaitingTest(LibraryPacemakerTest):
     def test_has_support(self):
-        expected_output = "something --wait something else"
+        expected_stdout = ""
+        expected_stderr = "something --wait something else"
+        expected_retval = 1
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (
+            expected_stdout,
+            expected_stderr,
+            expected_retval
+        )
+
+        self.assertTrue(
+            lib.has_resource_wait_support(mock_runner)
+        )
+        mock_runner.run.assert_called_once_with(
+            [self.path("crm_resource"), "-?"]
+        )
+
+    def test_has_support_stdout(self):
+        expected_stdout = "something --wait something else"
+        expected_stderr = ""
         expected_retval = 1
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_output, expected_retval)
+        mock_runner.run.return_value = (
+            expected_stdout,
+            expected_stderr,
+            expected_retval
+        )
 
         self.assertTrue(
             lib.has_resource_wait_support(mock_runner)
@@ -622,10 +725,15 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
         )
 
     def test_doesnt_have_support(self):
-        expected_output = "something something else"
+        expected_stdout = "something something else"
+        expected_stderr = "something something else"
         expected_retval = 1
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_output, expected_retval)
+        mock_runner.run.return_value = (
+            expected_stdout,
+            expected_stderr,
+            expected_retval
+        )
 
         self.assertFalse(
             lib.has_resource_wait_support(mock_runner)
@@ -652,10 +760,15 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
         )
 
     def test_wait_success(self):
-        expected_output = "expected output"
+        expected_stdout = "expected output"
+        expected_stderr = "expected stderr"
         expected_retval = 0
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_output, expected_retval)
+        mock_runner.run.return_value = (
+            expected_stdout,
+            expected_stderr,
+            expected_retval
+        )
 
         self.assertEqual(None, lib.wait_for_resources(mock_runner))
 
@@ -664,11 +777,16 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
         )
 
     def test_wait_timeout_success(self):
-        expected_output = "expected output"
+        expected_stdout = "expected output"
+        expected_stderr = "expected stderr"
         expected_retval = 0
         timeout = 10
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_output, expected_retval)
+        mock_runner.run.return_value = (
+            expected_stdout,
+            expected_stderr,
+            expected_retval
+        )
 
         self.assertEqual(None, lib.wait_for_resources(mock_runner, timeout))
 
@@ -680,10 +798,15 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
         )
 
     def test_wait_error(self):
-        expected_error = "some error"
+        expected_stdout = "some info"
+        expected_stderr = "some error"
         expected_retval = 1
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_error, expected_retval)
+        mock_runner.run.return_value = (
+            expected_stdout,
+            expected_stderr,
+            expected_retval
+        )
 
         assert_raise_library_error(
             lambda: lib.wait_for_resources(mock_runner),
@@ -691,8 +814,7 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
                 Severity.ERROR,
                 report_codes.RESOURCE_WAIT_ERROR,
                 {
-                    "return_value": expected_retval,
-                    "stdout": expected_error,
+                    "reason": expected_stderr + "\n" + expected_stdout,
                 }
             )
         )
@@ -702,10 +824,15 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
         )
 
     def test_wait_error_timeout(self):
-        expected_error = "some error"
+        expected_stdout = "some info"
+        expected_stderr = "some error"
         expected_retval = 62
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_error, expected_retval)
+        mock_runner.run.return_value = (
+            expected_stdout,
+            expected_stderr,
+            expected_retval
+        )
 
         assert_raise_library_error(
             lambda: lib.wait_for_resources(mock_runner),
@@ -713,8 +840,7 @@ class ResourcesWaitingTest(LibraryPacemakerTest):
                 Severity.ERROR,
                 report_codes.RESOURCE_WAIT_TIMED_OUT,
                 {
-                    "return_value": expected_retval,
-                    "stdout": expected_error,
+                    "reason": expected_stderr + "\n" + expected_stdout,
                 }
             )
         )
@@ -727,7 +853,7 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
     def test_standby_local(self):
         expected_retval = 0
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = ("dummy", expected_retval)
+        mock_runner.run.return_value = ("dummy", "", expected_retval)
 
         output = lib.nodes_standby(mock_runner)
 
@@ -739,7 +865,7 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
     def test_unstandby_local(self):
         expected_retval = 0
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = ("dummy", expected_retval)
+        mock_runner.run.return_value = ("dummy", "", expected_retval)
 
         output = lib.nodes_unstandby(mock_runner)
 
@@ -760,8 +886,8 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
             mock.call([self.path("crm_standby"), "-v", "on", "-N", n])
             for n in nodes
         ]
-        return_value_list = [(str(self.status), 0)]
-        return_value_list += [("dummy", 0) for n in nodes]
+        return_value_list = [(str(self.status), "", 0)]
+        return_value_list += [("dummy", "", 0) for n in nodes]
         mock_runner.run.side_effect = return_value_list
 
         output = lib.nodes_standby(mock_runner, all_nodes=True)
@@ -783,8 +909,8 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
             mock.call([self.path("crm_standby"), "-D", "-N", n])
             for n in nodes
         ]
-        return_value_list = [(str(self.status), 0)]
-        return_value_list += [("dummy", 0) for n in nodes]
+        return_value_list = [(str(self.status), "", 0)]
+        return_value_list += [("dummy", "", 0) for n in nodes]
         mock_runner.run.side_effect = return_value_list
 
         output = lib.nodes_unstandby(mock_runner, all_nodes=True)
@@ -806,8 +932,8 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
             mock.call([self.path("crm_standby"), "-v", "on", "-N", n])
             for n in nodes[1:]
         ]
-        return_value_list = [(str(self.status), 0)]
-        return_value_list += [("dummy", 0) for n in nodes[1:]]
+        return_value_list = [(str(self.status), "", 0)]
+        return_value_list += [("dummy", "", 0) for n in nodes[1:]]
         mock_runner.run.side_effect = return_value_list
 
         output = lib.nodes_standby(mock_runner, node_list=nodes[1:])
@@ -829,8 +955,8 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
             mock.call([self.path("crm_standby"), "-D", "-N", n])
             for n in nodes[:2]
         ]
-        return_value_list = [(str(self.status), 0)]
-        return_value_list += [("dummy", 0) for n in nodes[:2]]
+        return_value_list = [(str(self.status), "", 0)]
+        return_value_list += [("dummy", "", 0) for n in nodes[:2]]
         mock_runner.run.side_effect = return_value_list
 
         output = lib.nodes_unstandby(mock_runner, node_list=nodes[:2])
@@ -845,7 +971,7 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
             self.fixture_get_node_status("node_1", "id_1")
         )
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (str(self.status), 0)
+        mock_runner.run.return_value = (str(self.status), "", 0)
 
         assert_raise_library_error(
             lambda: lib.nodes_standby(mock_runner, ["node_2"]),
@@ -863,7 +989,7 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
             self.fixture_get_node_status("node_1", "id_1")
         )
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (str(self.status), 0)
+        mock_runner.run.return_value = (str(self.status), "", 0)
 
         assert_raise_library_error(
             lambda: lib.nodes_unstandby(mock_runner, ["node_2", "node_3"]),
@@ -882,17 +1008,24 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
         mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
 
     def test_error_one_node(self):
-        expected_error = "some error"
+        expected_stdout = "some info"
+        expected_stderr = "some error"
         expected_retval = 1
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (expected_error, expected_retval)
+        mock_runner.run.return_value = (
+            expected_stdout,
+            expected_stderr,
+            expected_retval
+        )
 
         assert_raise_library_error(
             lambda: lib.nodes_unstandby(mock_runner),
             (
                 Severity.ERROR,
                 report_codes.COMMON_ERROR,
-                {}
+                {
+                    "text": expected_stderr + "\n" + expected_stdout,
+                }
             )
         )
 
@@ -913,11 +1046,11 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
             for n in nodes
         ]
         return_value_list = [
-            (str(self.status), 0),
-            ("dummy1", 0),
-            ("dummy2", 1),
-            ("dummy3", 0),
-            ("dummy4", 1),
+            (str(self.status), "", 0),
+            ("dummy1", "", 0),
+            ("dummy2", "error2", 1),
+            ("dummy3", "", 0),
+            ("dummy4", "error4", 1),
         ]
         mock_runner.run.side_effect = return_value_list
 
@@ -926,12 +1059,16 @@ class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
             (
                 Severity.ERROR,
                 report_codes.COMMON_ERROR,
-                {}
+                {
+                    "text": "error2\ndummy2",
+                }
             ),
             (
                 Severity.ERROR,
                 report_codes.COMMON_ERROR,
-                {}
+                {
+                    "text": "error4\ndummy4",
+                }
             )
         )
 
diff --git a/pcs/test/test_lib_pacemaker_state.py b/pcs/test/test_lib_pacemaker_state.py
index 54f536d..13f6eb0 100644
--- a/pcs/test/test_lib_pacemaker_state.py
+++ b/pcs/test/test_lib_pacemaker_state.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 from lxml import etree
 
 from pcs.test.tools.assertions import assert_raise_library_error
diff --git a/pcs/test/test_lib_resource_agent.py b/pcs/test/test_lib_resource_agent.py
index 5704184..a569e66 100644
--- a/pcs/test/test_lib_resource_agent.py
+++ b/pcs/test/test_lib_resource_agent.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 import os.path
 
 from lxml import etree
@@ -14,7 +14,7 @@ from pcs.test.tools.assertions import (
     ExtendedAssertionsMixin,
     assert_xml_equal,
 )
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 from pcs.test.tools.xml import XmlManipulation as XmlMan
 
 
@@ -199,7 +199,7 @@ class GetFenceAgentMetadataTest(LibraryResourceTest):
     def test_execution_failed(self, mock_is_runnable):
         mock_is_runnable.return_value = True
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = ("error", 1)
+        mock_runner.run.return_value = ("", "error", 1)
         agent_name = "fence_ipmi"
 
         self.assert_raises(
@@ -210,13 +210,13 @@ class GetFenceAgentMetadataTest(LibraryResourceTest):
 
         script_path = os.path.join(settings.fence_agent_binaries, agent_name)
         mock_runner.run.assert_called_once_with(
-            [script_path, "-o", "metadata"], ignore_stderr=True
+            [script_path, "-o", "metadata"]
         )
 
     @mock.patch("pcs.lib.resource_agent.is_path_runnable")
     def test_invalid_xml(self, mock_is_runnable):
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = ("not xml", 0)
+        mock_runner.run.return_value = ("not xml", "", 0)
         mock_is_runnable.return_value = True
         agent_name = "fence_ipmi"
         self.assert_raises(
@@ -227,7 +227,7 @@ class GetFenceAgentMetadataTest(LibraryResourceTest):
 
         script_path = os.path.join(settings.fence_agent_binaries, agent_name)
         mock_runner.run.assert_called_once_with(
-            [script_path, "-o", "metadata"], ignore_stderr=True
+            [script_path, "-o", "metadata"]
         )
 
     @mock.patch("pcs.lib.resource_agent.is_path_runnable")
@@ -235,14 +235,14 @@ class GetFenceAgentMetadataTest(LibraryResourceTest):
         agent_name = "fence_ipmi"
         xml = "<xml />"
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (xml, 0)
+        mock_runner.run.return_value = (xml, "", 0)
         mock_is_runnable.return_value = True
 
         out_dom = lib_ra.get_fence_agent_metadata(mock_runner, agent_name)
 
         script_path = os.path.join(settings.fence_agent_binaries, agent_name)
         mock_runner.run.assert_called_once_with(
-            [script_path, "-o", "metadata"], ignore_stderr=True
+            [script_path, "-o", "metadata"]
         )
         assert_xml_equal(xml, str(XmlMan(out_dom)))
 
@@ -304,7 +304,7 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest):
         provider = "provider"
         agent = "agent"
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = ("error", 1)
+        mock_runner.run.return_value = ("", "error", 1)
         mock_is_runnable.return_value = True
 
         self.assert_raises(
@@ -318,8 +318,7 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest):
         script_path = os.path.join(settings.ocf_resources, provider, agent)
         mock_runner.run.assert_called_once_with(
             [script_path, "meta-data"],
-            env_extend={"OCF_ROOT": settings.ocf_root},
-            ignore_stderr=True
+            env_extend={"OCF_ROOT": settings.ocf_root}
         )
 
     @mock.patch("pcs.lib.resource_agent.is_path_runnable")
@@ -327,7 +326,7 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest):
         provider = "provider"
         agent = "agent"
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = ("not xml", 0)
+        mock_runner.run.return_value = ("not xml", "", 0)
         mock_is_runnable.return_value = True
 
         self.assert_raises(
@@ -341,8 +340,7 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest):
         script_path = os.path.join(settings.ocf_resources, provider, agent)
         mock_runner.run.assert_called_once_with(
             [script_path, "meta-data"],
-            env_extend={"OCF_ROOT": settings.ocf_root},
-            ignore_stderr=True
+            env_extend={"OCF_ROOT": settings.ocf_root}
         )
 
     @mock.patch("pcs.lib.resource_agent.is_path_runnable")
@@ -351,7 +349,7 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest):
         agent = "agent"
         xml = "<xml />"
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (xml, 0)
+        mock_runner.run.return_value = (xml, "", 0)
         mock_is_runnable.return_value = True
 
         out_dom = lib_ra._get_ocf_resource_agent_metadata(
@@ -361,8 +359,7 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest):
         script_path = os.path.join(settings.ocf_resources, provider, agent)
         mock_runner.run.assert_called_once_with(
             [script_path, "meta-data"],
-            env_extend={"OCF_ROOT": settings.ocf_root},
-            ignore_stderr=True
+            env_extend={"OCF_ROOT": settings.ocf_root}
         )
         assert_xml_equal(xml, str(XmlMan(out_dom)))
 
@@ -596,7 +593,7 @@ class GetPcmkAdvancedStonithParametersTest(LibraryResourceTest):
             </resource-agent>
         """
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = (xml, 0)
+        mock_runner.run.return_value = (xml, "", 0)
         self.assertEqual(
             [
                 {
@@ -623,12 +620,12 @@ class GetPcmkAdvancedStonithParametersTest(LibraryResourceTest):
             lib_ra._get_pcmk_advanced_stonith_parameters(mock_runner)
         )
         mock_runner.run.assert_called_once_with(
-            [settings.stonithd_binary, "metadata"], ignore_stderr=True
+            [settings.stonithd_binary, "metadata"]
         )
 
     def test_failed_to_get_xml(self):
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = ("", 1)
+        mock_runner.run.return_value = ("", "some error", 1)
         self.assert_raises(
             lib_ra.UnableToGetAgentMetadata,
             lambda: lib_ra._get_pcmk_advanced_stonith_parameters(mock_runner),
@@ -636,19 +633,19 @@ class GetPcmkAdvancedStonithParametersTest(LibraryResourceTest):
         )
 
         mock_runner.run.assert_called_once_with(
-            [settings.stonithd_binary, "metadata"], ignore_stderr=True
+            [settings.stonithd_binary, "metadata"]
         )
 
     def test_invalid_xml(self):
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = ("invalid XML", 0)
+        mock_runner.run.return_value = ("invalid XML", "", 0)
         self.assertRaises(
             lib_ra.InvalidMetadataFormat,
             lambda: lib_ra._get_pcmk_advanced_stonith_parameters(mock_runner)
         )
 
         mock_runner.run.assert_called_once_with(
-            [settings.stonithd_binary, "metadata"], ignore_stderr=True
+            [settings.stonithd_binary, "metadata"]
         )
 
 
diff --git a/pcs/test/test_lib_sbd.py b/pcs/test/test_lib_sbd.py
index e3c1401..9b7b801 100644
--- a/pcs/test/test_lib_sbd.py
+++ b/pcs/test/test_lib_sbd.py
@@ -6,9 +6,9 @@ from __future__ import (
 )
 
 import json
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
-from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.pcs_unittest import mock
 from pcs.test.tools.assertions import (
     assert_raise_library_error,
     assert_report_item_list_equal,
@@ -28,6 +28,7 @@ from pcs.lib.external import (
     NodeConnectionException,
 )
 import pcs.lib.sbd as lib_sbd
+from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade
 
 
 class TestException(Exception):
@@ -85,6 +86,111 @@ class RunParallelAndRaiseLibErrorOnFailureTest(TestCase):
         )
 
 
+class EvenNumberOfNodesAndNoQdevice(TestCase):
+    def setUp(self):
+        self.mock_corosync_conf = mock.MagicMock(spec_set=CorosyncConfigFacade)
+
+    def _set_ret_vals(self, nodes, qdevice):
+        self.mock_corosync_conf.get_nodes.return_value = nodes
+        self.mock_corosync_conf.has_quorum_device.return_value = qdevice
+
+    def test_even_num_no_qdevice(self):
+        self._set_ret_vals([1, 2], False)
+        self.assertTrue(lib_sbd._even_number_of_nodes_and_no_qdevice(
+            self.mock_corosync_conf
+        ))
+
+    def test_even_num_qdevice(self):
+        self._set_ret_vals([1, 2], True)
+        self.assertFalse(lib_sbd._even_number_of_nodes_and_no_qdevice(
+            self.mock_corosync_conf
+        ))
+
+    def test_odd_num_no_qdevice(self):
+        self._set_ret_vals([1, 2, 3], False)
+        self.assertFalse(lib_sbd._even_number_of_nodes_and_no_qdevice(
+            self.mock_corosync_conf
+        ))
+
+    def test_odd_num_qdevice(self):
+        self._set_ret_vals([1, 2, 3], True)
+        self.assertFalse(lib_sbd._even_number_of_nodes_and_no_qdevice(
+            self.mock_corosync_conf
+        ))
+
+    def test_even_num_no_qdevice_plus_one(self):
+        self._set_ret_vals([1, 2], False)
+        self.assertFalse(lib_sbd._even_number_of_nodes_and_no_qdevice(
+            self.mock_corosync_conf, 1
+        ))
+
+    def test_even_num_qdevice_plus_one(self):
+        self._set_ret_vals([1, 2], True)
+        self.assertFalse(lib_sbd._even_number_of_nodes_and_no_qdevice(
+            self.mock_corosync_conf, 1
+        ))
+
+    def test_odd_num_no_qdevice_plus_one(self):
+        self._set_ret_vals([1, 2, 3], False)
+        self.assertTrue(lib_sbd._even_number_of_nodes_and_no_qdevice(
+            self.mock_corosync_conf, 1
+        ))
+
+    def test_odd_num_qdevice_plus_one(self):
+        self._set_ret_vals([1, 2, 3], True)
+        self.assertFalse(lib_sbd._even_number_of_nodes_and_no_qdevice(
+            self.mock_corosync_conf, 1
+        ))
+
+
+ at mock.patch("pcs.lib.sbd.is_auto_tie_breaker_needed")
+class AtbHasToBeEnabledTest(TestCase):
+    def setUp(self):
+        self.mock_runner = "runner"
+        self.mock_conf = mock.MagicMock(spec_set=CorosyncConfigFacade)
+
+    def test_atb_needed_is_enabled(self, mock_is_needed):
+        mock_is_needed.return_value = True
+        self.mock_conf.is_enabled_auto_tie_breaker.return_value = True
+        self.assertFalse(lib_sbd.atb_has_to_be_enabled(
+            self.mock_runner, self.mock_conf, 1
+        ))
+        self.mock_conf.is_enabled_auto_tie_breaker.assert_called_once_with()
+        mock_is_needed.assert_not_called()
+
+    def test_atb_needed_is_disabled(self, mock_is_needed):
+        mock_is_needed.return_value = True
+        self.mock_conf.is_enabled_auto_tie_breaker.return_value = False
+        self.assertTrue(lib_sbd.atb_has_to_be_enabled(
+            self.mock_runner, self.mock_conf, -1
+        ))
+        self.mock_conf.is_enabled_auto_tie_breaker.assert_called_once_with()
+        mock_is_needed.assert_called_once_with(
+            self.mock_runner, self.mock_conf, -1
+        )
+
+    def test_atb_not_needed_is_enabled(self, mock_is_needed):
+        mock_is_needed.return_value = False
+        self.mock_conf.is_enabled_auto_tie_breaker.return_value = True
+        self.assertFalse(lib_sbd.atb_has_to_be_enabled(
+            self.mock_runner, self.mock_conf, 2
+        ))
+        self.mock_conf.is_enabled_auto_tie_breaker.assert_called_once_with()
+        mock_is_needed.assert_not_called()
+
+    def test_atb_not_needed_is_disabled(self, mock_is_needed):
+        mock_is_needed.return_value = False
+        self.mock_conf.is_enabled_auto_tie_breaker.return_value = False
+        self.assertFalse(lib_sbd.atb_has_to_be_enabled(
+            self.mock_runner, self.mock_conf, -2
+        ))
+        self.mock_conf.is_enabled_auto_tie_breaker.assert_called_once_with()
+        mock_is_needed.assert_called_once_with(
+            self.mock_runner, self.mock_conf, -2
+        )
+
+
+
 class CheckSbdTest(TestCase):
     def test_success(self):
         mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
@@ -316,11 +422,11 @@ class SetSbdConfigOnNodeTest(TestCase):
         }
         cfg_out = """# This file has been generated by pcs.
 SBD_OPTS="-n node1"
-SBD_WATCHDOG_DEV=/dev/watchdog
+SBD_WATCHDOG_DEV=/my/watchdog
 SBD_WATCHDOG_TIMEOUT=0
 """
         lib_sbd.set_sbd_config_on_node(
-            self.mock_rep, self.mock_com, self.node, cfg_in
+            self.mock_rep, self.mock_com, self.node, cfg_in, "/my/watchdog"
         )
         mock_set_sbd_cfg.assert_called_once_with(
             self.mock_com, self.node, cfg_out
@@ -340,17 +446,24 @@ class SetSbdConfigOnAllNodesTest(TestCase):
     def test_success(self, mock_func):
         mock_com = mock.MagicMock(spec_set=NodeCommunicator)
         mock_rep = MockLibraryReportProcessor()
-        node_list = [NodeAddresses("node" + str(i)) for i in range(5)]
+        watchdog_dict = dict([
+            (NodeAddresses("node" + str(i)), "/dev/watchdog" + str(i))
+            for i in range(5)
+        ])
+        node_list = list(watchdog_dict.keys())
         config = {
             "opt1": "val1",
             "opt2": "val2"
         }
         lib_sbd.set_sbd_config_on_all_nodes(
-            mock_rep, mock_com, node_list, config
+            mock_rep, mock_com, node_list, config, watchdog_dict
         )
         mock_func.assert_called_once_with(
             lib_sbd.set_sbd_config_on_node,
-            [([mock_rep, mock_com, node, config], {}) for node in node_list]
+            [
+                ([mock_rep, mock_com, node, config, watchdog_dict[node]], {})
+                for node in node_list
+            ]
         )
 
 
@@ -594,3 +707,17 @@ class IsSbdEnabledTest(TestCase):
         mock_obj = mock.MagicMock()
         mock_is_service_enabled.return_value = True
         self.assertTrue(lib_sbd.is_sbd_enabled(mock_obj))
+
+
+ at mock.patch("pcs.lib.external.is_service_installed")
+class IsSbdInstalledTest(TestCase):
+    def test_installed(self, mock_is_service_installed):
+        mock_obj = mock.MagicMock()
+        mock_is_service_installed.return_value = True
+        self.assertTrue(lib_sbd.is_sbd_installed(mock_obj))
+
+    def test_not_installed(self, mock_is_service_installed):
+        mock_obj = mock.MagicMock()
+        mock_is_service_installed.return_value = False
+        self.assertFalse(lib_sbd.is_sbd_installed(mock_obj))
+
diff --git a/pcs/test/test_lib_tools.py b/pcs/test/test_lib_tools.py
index 5141ca9..606cb05 100644
--- a/pcs/test/test_lib_tools.py
+++ b/pcs/test/test_lib_tools.py
@@ -5,7 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
 from pcs.lib import tools
 
diff --git a/pcs/test/test_node.py b/pcs/test/test_node.py
index 023148c..137c7c7 100644
--- a/pcs/test/test_node.py
+++ b/pcs/test/test_node.py
@@ -6,13 +6,21 @@ from __future__ import (
 )
 
 import shutil
-import unittest
+from pcs.test.tools import pcs_unittest as unittest
+from pcs.test.tools.pcs_unittest import mock
 
+from pcs import node
+from pcs.test.tools.assertions import AssertPcsMixin
 from pcs.test.tools.misc import (
     ac,
     get_test_resource as rc,
 )
-from pcs.test.tools.pcs_runner import pcs
+from pcs.test.tools.pcs_runner import (
+    pcs,
+    PcsRunner,
+)
+
+from pcs import utils
 
 empty_cib = rc("cib-empty-withnodes.xml")
 temp_cib = rc("temp-cib.xml")
@@ -82,11 +90,14 @@ Node Attributes:
 """
         ac(expected_out, output)
 
-        output, returnVal = pcs(temp_cib, "node maintenance nonexistant-node")
+        output, returnVal = pcs(
+            temp_cib, "node maintenance nonexistant-node and-another"
+        )
         self.assertEqual(returnVal, 1)
         self.assertEqual(
             output,
             "Error: Node 'nonexistant-node' does not appear to exist in configuration\n"
+            "Error: Node 'and-another' does not appear to exist in configuration\n"
         )
         output, _ = pcs(temp_cib, "property")
         expected_out = """\
@@ -128,11 +139,14 @@ Cluster Properties:
 """
         ac(expected_out, output)
 
-        output, returnVal = pcs(temp_cib, "node unmaintenance nonexistant-node")
+        output, returnVal = pcs(
+            temp_cib, "node unmaintenance nonexistant-node and-another"
+        )
         self.assertEqual(returnVal, 1)
         self.assertEqual(
             output,
             "Error: Node 'nonexistant-node' does not appear to exist in configuration\n"
+            "Error: Node 'and-another' does not appear to exist in configuration\n"
         )
         output, _ = pcs(temp_cib, "property")
         expected_out = """\
@@ -182,7 +196,7 @@ Cluster Properties:
         output, returnVal = pcs(temp_cib, "node utilization rh7-2")
         expected_out = """\
 Node Utilization:
- rh7-2: \n"""
+"""
         ac(expected_out, output)
         self.assertEqual(0, returnVal)
 
@@ -229,10 +243,43 @@ Node Utilization:
         ac(expected_out, output)
         self.assertEqual(0, returnVal)
 
+        output, returnVal = pcs(
+            temp_cib, "node utilization rh7-2 test1=-20"
+        )
+        ac("", output)
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "node utilization --name test1")
+        expected_out = """\
+Node Utilization:
+ rh7-1: test1=-10
+ rh7-2: test1=-20
+"""
+        ac(expected_out, output)
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib,
+            "node utilization --name test1 rh7-2"
+        )
+        expected_out = """\
+Node Utilization:
+ rh7-2: test1=-20
+"""
+        ac(expected_out, output)
+        self.assertEqual(0, returnVal)
+
     def test_node_utilization_set_invalid(self):
-        output, returnVal = pcs(temp_cib, "node utilization rh7-0")
+        output, returnVal = pcs(temp_cib, "node utilization rh7-1 test")
         expected_out = """\
-Error: Unable to find a node: rh7-0
+Error: missing value of 'test' option
+"""
+        ac(expected_out, output)
+        self.assertEqual(1, returnVal)
+
+        output, returnVal = pcs(temp_cib, "node utilization rh7-1 =10")
+        expected_out = """\
+Error: missing key in '=10' option
 """
         ac(expected_out, output)
         self.assertEqual(1, returnVal)
@@ -252,3 +299,284 @@ Error: Value of utilization attribute must be integer: 'test=int'
 """
         ac(expected_out, output)
         self.assertEqual(1, returnVal)
+
+
+class NodeAttributeTest(unittest.TestCase, AssertPcsMixin):
+    def setUp(self):
+        shutil.copy(empty_cib, temp_cib)
+        self.pcs_runner = PcsRunner(temp_cib)
+
+    def fixture_attrs(self, nodes, attrs=None):
+        attrs = dict() if attrs is None else attrs
+        xml_lines = ['<nodes>']
+        for node_id, node_name in enumerate(nodes, 1):
+            xml_lines.extend([
+                '<node id="{0}" uname="{1}">'.format(node_id, node_name),
+                '<instance_attributes id="nodes-{0}">'.format(node_id),
+            ])
+            nv = '<nvpair id="nodes-{id}-{name}" name="{name}" value="{val}"/>'
+            for name, value in attrs.get(node_name, dict()).items():
+                xml_lines.append(nv.format(id=node_id, name=name, val=value))
+            xml_lines.extend([
+                '</instance_attributes>',
+                '</node>'
+            ])
+        xml_lines.append('</nodes>')
+
+        utils.usefile = True
+        utils.filename = temp_cib
+        output, retval = utils.run([
+            "cibadmin", "--modify", '--xml-text', "\n".join(xml_lines)
+        ])
+        assert output == ""
+        assert retval == 0
+
+    def test_show_empty(self):
+        self.fixture_attrs(["rh7-1", "rh7-2"])
+        self.assert_pcs_success(
+            "node attribute",
+            "Node Attributes:\n"
+        )
+
+    def test_show_nonempty(self):
+        self.fixture_attrs(
+            ["rh7-1", "rh7-2"],
+            {
+                "rh7-1": {"IP": "192.168.1.1", },
+                "rh7-2": {"IP": "192.168.1.2", },
+            }
+        )
+        self.assert_pcs_success(
+            "node attribute",
+            """\
+Node Attributes:
+ rh7-1: IP=192.168.1.1
+ rh7-2: IP=192.168.1.2
+"""
+        )
+
+    def test_show_multiple_per_node(self):
+        self.fixture_attrs(
+            ["rh7-1", "rh7-2"],
+            {
+                "rh7-1": {"IP": "192.168.1.1", "alias": "node1", },
+                "rh7-2": {"IP": "192.168.1.2", "alias": "node2", },
+            }
+        )
+        self.assert_pcs_success(
+            "node attribute",
+            """\
+Node Attributes:
+ rh7-1: IP=192.168.1.1 alias=node1
+ rh7-2: IP=192.168.1.2 alias=node2
+"""
+        )
+
+    def test_show_one_node(self):
+        self.fixture_attrs(
+            ["rh7-1", "rh7-2"],
+            {
+                "rh7-1": {"IP": "192.168.1.1", "alias": "node1", },
+                "rh7-2": {"IP": "192.168.1.2", "alias": "node2", },
+            }
+        )
+        self.assert_pcs_success(
+            "node attribute rh7-1",
+            """\
+Node Attributes:
+ rh7-1: IP=192.168.1.1 alias=node1
+"""
+        )
+
+    def test_show_missing_node(self):
+        self.fixture_attrs(
+            ["rh7-1", "rh7-2"],
+            {
+                "rh7-1": {"IP": "192.168.1.1", "alias": "node1", },
+                "rh7-2": {"IP": "192.168.1.2", "alias": "node2", },
+            }
+        )
+        self.assert_pcs_success(
+            "node attribute rh7-3",
+            """\
+Node Attributes:
+"""
+        )
+
+    def test_show_name(self):
+        self.fixture_attrs(
+            ["rh7-1", "rh7-2"],
+            {
+                "rh7-1": {"IP": "192.168.1.1", "alias": "node1", },
+                "rh7-2": {"IP": "192.168.1.2", "alias": "node2", },
+            }
+        )
+        self.assert_pcs_success(
+            "node attribute --name alias",
+            """\
+Node Attributes:
+ rh7-1: alias=node1
+ rh7-2: alias=node2
+"""
+        )
+
+    def test_show_missing_name(self):
+        self.fixture_attrs(
+            ["rh7-1", "rh7-2"],
+            {
+                "rh7-1": {"IP": "192.168.1.1", "alias": "node1", },
+                "rh7-2": {"IP": "192.168.1.2", "alias": "node2", },
+            }
+        )
+        self.assert_pcs_success(
+            "node attribute --name missing",
+            """\
+Node Attributes:
+"""
+        )
+
+    def test_show_node_and_name(self):
+        self.fixture_attrs(
+            ["rh7-1", "rh7-2"],
+            {
+                "rh7-1": {"IP": "192.168.1.1", "alias": "node1", },
+                "rh7-2": {"IP": "192.168.1.2", "alias": "node2", },
+            }
+        )
+        self.assert_pcs_success(
+            "node attribute --name alias rh7-1",
+            """\
+Node Attributes:
+ rh7-1: alias=node1
+"""
+        )
+
+    def test_set_new(self):
+        self.fixture_attrs(["rh7-1", "rh7-2"])
+        self.assert_pcs_success(
+            "node attribute rh7-1 IP=192.168.1.1"
+        )
+        self.assert_pcs_success(
+            "node attribute",
+            """\
+Node Attributes:
+ rh7-1: IP=192.168.1.1
+"""
+        )
+        self.assert_pcs_success(
+            "node attribute rh7-2 IP=192.168.1.2"
+        )
+        self.assert_pcs_success(
+            "node attribute",
+            """\
+Node Attributes:
+ rh7-1: IP=192.168.1.1
+ rh7-2: IP=192.168.1.2
+"""
+        )
+
+    def test_set_existing(self):
+        self.fixture_attrs(
+            ["rh7-1", "rh7-2"],
+            {
+                "rh7-1": {"IP": "192.168.1.1", },
+                "rh7-2": {"IP": "192.168.1.2", },
+            }
+        )
+        self.assert_pcs_success(
+            "node attribute rh7-2 IP=192.168.2.2"
+        )
+        self.assert_pcs_success(
+            "node attribute",
+            """\
+Node Attributes:
+ rh7-1: IP=192.168.1.1
+ rh7-2: IP=192.168.2.2
+"""
+        )
+
+    def test_unset(self):
+        self.fixture_attrs(
+            ["rh7-1", "rh7-2"],
+            {
+                "rh7-1": {"IP": "192.168.1.1", },
+                "rh7-2": {"IP": "192.168.1.2", },
+            }
+        )
+        self.assert_pcs_success(
+            "node attribute rh7-2 IP="
+        )
+        self.assert_pcs_success(
+            "node attribute",
+            """\
+Node Attributes:
+ rh7-1: IP=192.168.1.1
+"""
+        )
+
+    def test_unset_nonexisting(self):
+        self.fixture_attrs(
+            ["rh7-1", "rh7-2"],
+            {
+                "rh7-1": {"IP": "192.168.1.1", },
+                "rh7-2": {"IP": "192.168.1.2", },
+            }
+        )
+        self.assert_pcs_result(
+            "node attribute rh7-1 missing=",
+            "Error: attribute: 'missing' doesn't exist for node: 'rh7-1'\n",
+            returncode=2
+        )
+
+    def test_unset_nonexisting_forced(self):
+        self.fixture_attrs(
+            ["rh7-1", "rh7-2"],
+            {
+                "rh7-1": {"IP": "192.168.1.1", },
+                "rh7-2": {"IP": "192.168.1.2", },
+            }
+        )
+        self.assert_pcs_success(
+            "node attribute rh7-1 missing= --force",
+            ""
+        )
+
+class SetNodeUtilizationTest(unittest.TestCase, AssertPcsMixin):
+    def setUp(self):
+        shutil.copy(empty_cib, temp_cib)
+        self.pcs_runner = PcsRunner(temp_cib)
+
+    def test_refuse_non_option_attribute_parameter_among_options(self):
+        self.assert_pcs_fail("node utilization rh7-1 net", [
+            "Error: missing value of 'net' option",
+        ])
+
+    def test_refuse_option_without_key(self):
+        self.assert_pcs_fail("node utilization rh7-1 =1", [
+            "Error: missing key in '=1' option",
+        ])
+
+class PrintNodeUtilizationTest(unittest.TestCase, AssertPcsMixin):
+    def setUp(self):
+        shutil.copy(empty_cib, temp_cib)
+        self.pcs_runner = PcsRunner(temp_cib)
+
+    @mock.patch("pcs.node.utils")
+    def test_refuse_when_node_not_in_cib_and_is_not_remote(self, mock_utils):
+        mock_cib = mock.MagicMock()
+        mock_cib.getElementsByTagName = mock.Mock(return_value=[])
+
+        mock_utils.get_cib_dom = mock.Mock(return_value=mock_cib)
+        mock_utils.usefile = False
+        mock_utils.getNodeAttributesFromPacemaker = mock.Mock(return_value=[])
+        mock_utils.err = mock.Mock(side_effect=SystemExit)
+
+        self.assertRaises(
+            SystemExit,
+            lambda: node.print_node_utilization("some")
+        )
+
+    def test_refuse_when_node_not_in_mocked_cib(self):
+        self.assert_pcs_fail("node utilization some_nonexistent_node", [
+            "Error: Unable to find a node: some_nonexistent_node",
+        ])
diff --git a/pcs/test/test_properties.py b/pcs/test/test_properties.py
index 6cdd2e5..9634cca 100644
--- a/pcs/test/test_properties.py
+++ b/pcs/test/test_properties.py
@@ -6,13 +6,17 @@ from __future__ import (
 )
 
 import shutil
-import unittest
+from pcs.test.tools import pcs_unittest as unittest
 
+from pcs.test.tools.assertions import AssertPcsMixin
 from pcs.test.tools.misc import (
     ac,
     get_test_resource as rc,
 )
-from pcs.test.tools.pcs_runner import pcs
+from pcs.test.tools.pcs_runner import (
+    pcs,
+    PcsRunner,
+)
 
 from pcs import utils
 
@@ -66,61 +70,6 @@ class PropertyTest(unittest.TestCase):
         assert "stonith-enabled: false" in output
         assert output.startswith('Cluster Properties:\n batch-limit')
 
-    def testNodeProperties(self):
-        utils.usefile = True
-        utils.filename = temp_cib
-        o,r = utils.run(["cibadmin","-M", '--xml-text', '<nodes><node id="1" uname="rh7-1"><instance_attributes id="nodes-1"/></node><node id="2" uname="rh7-2"><instance_attributes id="nodes-2"/></node></nodes>'])
-        ac(o,"")
-        assert r == 0
-
-        o,r = pcs("property set --node=rh7-1 IP=192.168.1.1")
-        ac(o,"")
-        assert r==0
-
-        o,r = pcs("property set --node=rh7-2 IP=192.168.2.2")
-        ac(o,"")
-        assert r==0
-
-        o,r = pcs("property")
-        ac(o,"Cluster Properties:\nNode Attributes:\n rh7-1: IP=192.168.1.1\n rh7-2: IP=192.168.2.2\n")
-        assert r==0
-
-        o,r = pcs("property set --node=rh7-2 IP=")
-        ac(o,"")
-        assert r==0
-
-        o,r = pcs("property")
-        ac(o,"Cluster Properties:\nNode Attributes:\n rh7-1: IP=192.168.1.1\n")
-        assert r==0
-
-        o,r = pcs("property set --node=rh7-1 IP=192.168.1.1")
-        ac(o,"")
-        assert r==0
-
-        o,r = pcs("property set --node=rh7-2 IP=192.168.2.2")
-        ac(o,"")
-        assert r==0
-
-        o,r = pcs("property")
-        ac(o,"Cluster Properties:\nNode Attributes:\n rh7-1: IP=192.168.1.1\n rh7-2: IP=192.168.2.2\n")
-        assert r==0
-
-        o,r = pcs("property unset --node=rh7-1 IP")
-        ac(o,"")
-        assert r==0
-
-        o,r = pcs("property")
-        ac(o,"Cluster Properties:\nNode Attributes:\n rh7-2: IP=192.168.2.2\n")
-        assert r==0
-
-        o,r = pcs("property unset --node=rh7-1 IP")
-        ac(o,"Error: attribute: 'IP' doesn't exist for node: 'rh7-1'\n")
-        assert r==2
-
-        o,r = pcs("property unset --node=rh7-1 IP --force")
-        ac(o,"")
-        assert r==0
-
     def testBadProperties(self):
         o,r = pcs(temp_cib, "property set xxxx=zzzz")
         self.assertEqual(r, 1)
@@ -329,3 +278,205 @@ class PropertyTest(unittest.TestCase):
  default-resource-stickiness: 0.1
 """
         )
+
+
+class NodePropertyTestBase(unittest.TestCase, AssertPcsMixin):
+    def setUp(self):
+        shutil.copy(empty_cib, temp_cib)
+        self.pcs_runner = PcsRunner(temp_cib)
+
+    def fixture_nodes(self, nodes, attrs=None):
+        attrs = dict() if attrs is None else attrs
+        xml_lines = ['<nodes>']
+        for node_id, node_name in enumerate(nodes, 1):
+            xml_lines.extend([
+                '<node id="{0}" uname="{1}">'.format(node_id, node_name),
+                '<instance_attributes id="nodes-{0}">'.format(node_id),
+            ])
+            nv = '<nvpair id="nodes-{id}-{name}" name="{name}" value="{val}"/>'
+            for name, value in attrs.get(node_name, dict()).items():
+                xml_lines.append(nv.format(id=node_id, name=name, val=value))
+            xml_lines.extend([
+                '</instance_attributes>',
+                '</node>'
+            ])
+        xml_lines.append('</nodes>')
+
+        utils.usefile = True
+        utils.filename = temp_cib
+        output, retval = utils.run([
+            "cibadmin", "--modify", '--xml-text', "\n".join(xml_lines)
+        ])
+        assert output == ""
+        assert retval == 0
+
+class NodePropertyShowTest(NodePropertyTestBase):
+    def test_empty(self):
+        self.fixture_nodes(["rh7-1", "rh7-2"])
+        self.assert_pcs_success(
+            "property",
+            "Cluster Properties:\n"
+        )
+
+    def test_nonempty(self):
+        self.fixture_nodes(
+            ["rh7-1", "rh7-2"],
+            {
+                "rh7-1": {"IP": "192.168.1.1", },
+                "rh7-2": {"IP": "192.168.1.2", },
+            }
+        )
+        self.assert_pcs_success(
+            "property",
+            """\
+Cluster Properties:
+Node Attributes:
+ rh7-1: IP=192.168.1.1
+ rh7-2: IP=192.168.1.2
+"""
+        )
+
+    def test_multiple_per_node(self):
+        self.fixture_nodes(
+            ["rh7-1", "rh7-2"],
+            {
+                "rh7-1": {"IP": "192.168.1.1", "alias": "node1", },
+                "rh7-2": {"IP": "192.168.1.2", "alias": "node2", },
+            }
+        )
+        self.assert_pcs_success(
+            "property",
+            """\
+Cluster Properties:
+Node Attributes:
+ rh7-1: IP=192.168.1.1 alias=node1
+ rh7-2: IP=192.168.1.2 alias=node2
+"""
+        )
+
+    def test_name_filter_not_exists(self):
+        self.fixture_nodes(
+            ["rh7-1", "rh7-2"],
+            {
+                "rh7-1": {"IP": "192.168.1.1", },
+                "rh7-2": {"IP": "192.168.1.2", },
+            }
+        )
+        self.assert_pcs_success(
+            "property show alias",
+            """\
+Cluster Properties:
+"""
+        )
+
+    def test_name_filter_exists(self):
+        self.fixture_nodes(
+            ["rh7-1", "rh7-2"],
+            {
+                "rh7-1": {"IP": "192.168.1.1", "alias": "node1", },
+                "rh7-2": {"IP": "192.168.1.2", },
+            }
+        )
+        self.assert_pcs_success(
+            "property show alias",
+            """\
+Cluster Properties:
+Node Attributes:
+ rh7-1: alias=node1
+"""
+        )
+
+class NodePropertySetTest(NodePropertyTestBase):
+    def test_set_new(self):
+        self.fixture_nodes(["rh7-1", "rh7-2"])
+        self.assert_pcs_success(
+            "property set --node=rh7-1 IP=192.168.1.1"
+        )
+        self.assert_pcs_success(
+            "property",
+            """\
+Cluster Properties:
+Node Attributes:
+ rh7-1: IP=192.168.1.1
+"""
+        )
+        self.assert_pcs_success(
+            "property set --node=rh7-2 IP=192.168.1.2"
+        )
+        self.assert_pcs_success(
+            "property",
+            """\
+Cluster Properties:
+Node Attributes:
+ rh7-1: IP=192.168.1.1
+ rh7-2: IP=192.168.1.2
+"""
+        )
+
+    def test_set_existing(self):
+        self.fixture_nodes(
+            ["rh7-1", "rh7-2"],
+            {
+                "rh7-1": {"IP": "192.168.1.1", },
+                "rh7-2": {"IP": "192.168.1.2", },
+            }
+        )
+        self.assert_pcs_success(
+            "property set --node=rh7-2 IP=192.168.2.2"
+        )
+        self.assert_pcs_success(
+            "property",
+            """\
+Cluster Properties:
+Node Attributes:
+ rh7-1: IP=192.168.1.1
+ rh7-2: IP=192.168.2.2
+"""
+        )
+
+    def test_unset(self):
+        self.fixture_nodes(
+            ["rh7-1", "rh7-2"],
+            {
+                "rh7-1": {"IP": "192.168.1.1", },
+                "rh7-2": {"IP": "192.168.1.2", },
+            }
+        )
+        self.assert_pcs_success(
+            "property set --node=rh7-2 IP="
+        )
+        self.assert_pcs_success(
+            "property",
+            """\
+Cluster Properties:
+Node Attributes:
+ rh7-1: IP=192.168.1.1
+"""
+        )
+
+    def test_unset_nonexisting(self):
+        self.fixture_nodes(
+            ["rh7-1", "rh7-2"],
+            {
+                "rh7-1": {"IP": "192.168.1.1", },
+                "rh7-2": {"IP": "192.168.1.2", },
+            }
+        )
+        self.assert_pcs_result(
+            "property unset --node=rh7-1 missing",
+            "Error: attribute: 'missing' doesn't exist for node: 'rh7-1'\n",
+            returncode=2
+        )
+
+    def test_unset_nonexisting_forced(self):
+        self.fixture_nodes(
+            ["rh7-1", "rh7-2"],
+            {
+                "rh7-1": {"IP": "192.168.1.1", },
+                "rh7-2": {"IP": "192.168.1.2", },
+            }
+        )
+        self.assert_pcs_success(
+            "property unset --node=rh7-1 missing --force",
+            ""
+        )
diff --git a/pcs/test/test_quorum.py b/pcs/test/test_quorum.py
index 86de4c6..4f15d7f 100644
--- a/pcs/test/test_quorum.py
+++ b/pcs/test/test_quorum.py
@@ -6,7 +6,7 @@ from __future__ import (
 )
 
 import shutil
-from unittest import TestCase
+from pcs.test.tools.pcs_unittest import TestCase
 
 from pcs.test.tools.assertions import AssertPcsMixin
 from pcs.test.tools.misc import (
diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
index 2fa5088..556a9c3 100644
--- a/pcs/test/test_resource.py
+++ b/pcs/test/test_resource.py
@@ -8,7 +8,7 @@ from __future__ import (
 import os
 import shutil
 import re
-import unittest
+from pcs.test.tools import pcs_unittest as unittest
 
 from pcs.test.tools.assertions import AssertPcsMixin
 from pcs.test.tools.misc import (
@@ -109,7 +109,6 @@ class ResourceTest(unittest.TestCase):
         assert output == "Error: Unable to find resource: bad_resource\n"
 
         output, returnVal = pcs(temp_cib, "resource describe ocf:heartbeat:Dummy")
-        assert returnVal == 0
         ac(output, """\
 ocf:heartbeat:Dummy - Example stateless resource agent
 
@@ -127,10 +126,15 @@ but moderate. The minimum timeouts should never be below 10 seconds.
 Resource options:
   state: Location to store the resource state in.
   fake: Fake attribute that can be changed to cause a reload
+
+Default operations:
+  start: timeout=20
+  stop: timeout=20
+  monitor: interval=10 timeout=20
 """)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource describe Dummy")
-        assert returnVal == 0
         ac(output, """\
 ocf:heartbeat:Dummy - Example stateless resource agent
 
@@ -148,7 +152,13 @@ but moderate. The minimum timeouts should never be below 10 seconds.
 Resource options:
   state: Location to store the resource state in.
   fake: Fake attribute that can be changed to cause a reload
+
+Default operations:
+  start: timeout=20
+  stop: timeout=20
+  monitor: interval=10 timeout=20
 """)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource describe SystemHealth")
         assert returnVal == 0
@@ -158,6 +168,10 @@ ocf:pacemaker:SystemHealth - SystemHealth resource agent
 This is a SystemHealth Resource Agent.  It is used to monitor
 the health of a system via IPMI.
 
+Default operations:
+  start: timeout=20
+  stop: timeout=20
+  monitor: timeout=20
 """)
 
     def testAddResources(self):
@@ -213,8 +227,7 @@ the health of a system via IPMI.
  ClusterIP7\t(ocf::heartbeat:IPaddr2):\tStopped (disabled)
 """)
 
-        output, returnVal = pcs(temp_cib, "resource show ClusterIP6 --full")
-        assert returnVal == 0
+        output, returnVal = pcs(temp_cib, "resource show --full")
         ac(output, """\
  Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
   Attributes: ip=192.168.0.99 cidr_netmask=32
@@ -241,6 +254,7 @@ the health of a system via IPMI.
   Meta Attrs: target-role=Stopped 
   Operations: monitor interval=30s (ClusterIP7-monitor-interval-30s)
 """)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -785,7 +799,7 @@ monitor interval=60s (state-monitor-interval-60s)
         assert returnVal == 0
         assert output == ""
 
-        line = 'resource show ClusterIP --full'
+        line = 'resource show ClusterIP'
         output, returnVal = pcs(temp_cib, line)
         ac(output, """\
  Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
@@ -3463,16 +3477,23 @@ Error: Cannot remove more than one resource from cloned group
         ac(o,"")
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource show D1 --full")
-        ac(o," Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n  Meta Attrs: target-role=Stopped \n  Operations: monitor interval=60s (D1-monitor-interval-60s)\n")
+        o,r = pcs(temp_cib, "resource show D1")
+        ac(o, """\
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+  Meta Attrs: target-role=Stopped 
+  Operations: monitor interval=60s (D1-monitor-interval-60s)
+""")
         assert r == 0
 
         o,r = pcs(temp_cib, "resource enable D1")
         ac(o,"")
         assert r == 0
 
-        o,r = pcs(temp_cib, "resource show D1 --full")
-        ac(o," Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s (D1-monitor-interval-60s)\n")
+        o,r = pcs(temp_cib, "resource show D1")
+        ac(o, """\
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+  Operations: monitor interval=60s (D1-monitor-interval-60s)
+""")
         assert r == 0
 
         # bad resource name
@@ -4423,6 +4444,24 @@ Resource Utilization:
         self.assertEqual(0, returnVal)
 
     def test_resource_utilization_set_invalid(self):
+        output, returnVal = pcs(
+            temp_large_cib, "resource utilization dummy test"
+        )
+        expected_out = """\
+Error: missing value of 'test' option
+"""
+        ac(expected_out, output)
+        self.assertEqual(1, returnVal)
+
+        output, returnVal = pcs(
+            temp_large_cib, "resource utilization dummy =10"
+        )
+        expected_out = """\
+Error: missing key in '=10' option
+"""
+        ac(expected_out, output)
+        self.assertEqual(1, returnVal)
+
         output, returnVal = pcs(temp_large_cib, "resource utilization dummy0")
         expected_out = """\
 Error: Unable to find a resource: dummy0
diff --git a/pcs/test/test_rule.py b/pcs/test/test_rule.py
index 8cf717a..ad3448d 100644
--- a/pcs/test/test_rule.py
+++ b/pcs/test/test_rule.py
@@ -6,7 +6,7 @@ from __future__ import (
 )
 
 import shutil
-import unittest
+from pcs.test.tools import pcs_unittest as unittest
 import xml.dom.minidom
 
 from pcs import rule
diff --git a/pcs/test/test_stonith.py b/pcs/test/test_stonith.py
index a6ee2f5..82b2c84 100644
--- a/pcs/test/test_stonith.py
+++ b/pcs/test/test_stonith.py
@@ -6,7 +6,7 @@ from __future__ import (
 )
 
 import shutil
-import unittest
+from pcs.test.tools import pcs_unittest as unittest
 
 from pcs.test.tools.misc import (
     ac,
diff --git a/pcs/test/test_utils.py b/pcs/test/test_utils.py
index 819f8ee..c4c6d87 100644
--- a/pcs/test/test_utils.py
+++ b/pcs/test/test_utils.py
@@ -6,7 +6,7 @@ from __future__ import (
 )
 
 import sys
-import unittest
+from pcs.test.tools import pcs_unittest as unittest
 import xml.dom.minidom
 import xml.etree.cElementTree as ET
 from time import sleep
@@ -273,6 +273,9 @@ class UtilsTest(unittest.TestCase):
                             name="remote-node" value="guest2"/>
                     </instance_attributes>
                 </primitive>
+                <primitive id="dummy3"
+                        class="ocf" provider="pacemaker" type="remote">
+                </primitive>
             </resources>
         """).documentElement
         resources = dom.getElementsByTagName("resources")[0]
@@ -296,6 +299,12 @@ class UtilsTest(unittest.TestCase):
                 utils.dom_get_resource(dom, "vm-guest1")
             )
         )
+        self.assertEqual(
+            "dummy3",
+            utils.dom_get_resource_remote_node_name(
+                utils.dom_get_resource(dom, "dummy3")
+            )
+        )
 
     def test_dom_get_meta_attr_value(self):
         dom = self.get_cib_empty()
@@ -1391,12 +1400,12 @@ class UtilsTest(unittest.TestCase):
         """).documentElement
         self.assertRaises(
             SystemExit,
-            utils.dom_update_utilization, el, [("name", "invalid_val")]
+            utils.dom_update_utilization, el, {"name": "invalid_val"}
         )
 
         self.assertRaises(
             SystemExit,
-            utils.dom_update_utilization, el, [("name", "0.01")]
+            utils.dom_update_utilization, el, {"name": "0.01"}
         )
 
         sys.stderr = tmp_stderr
@@ -1406,7 +1415,12 @@ class UtilsTest(unittest.TestCase):
         <resource id="test_id"/>
         """).documentElement
         utils.dom_update_utilization(
-            el, [("name", ""), ("key", "-1"), ("keys", "90")]
+            el,
+            {
+                "name": "",
+                "key": "-1",
+                "keys": "90",
+            }
         )
 
         self.assertEqual(len(dom_get_child_elements(el)), 1)
@@ -1450,7 +1464,11 @@ class UtilsTest(unittest.TestCase):
         </resource>
         """).documentElement
         utils.dom_update_utilization(
-            el, [("key", "100"), ("keys", "")]
+            el,
+            {
+                "key": "100",
+                "keys": "",
+            }
         )
 
         u = dom_get_child_elements(el)[0]
@@ -1797,7 +1815,10 @@ class RunParallelTest(unittest.TestCase):
             wait_seconds=.1
         )
 
-        self.assertEqual(log, ['first', 'second'])
+        self.assertEqual(
+            sorted(log),
+            sorted(['first', 'second'])
+        )
 
     def test_wait_for_slower_workers(self):
         log = []
diff --git a/pcs/test/tools/color_text_runner.py b/pcs/test/tools/color_text_runner.py
index 78a0787..b8383f6 100644
--- a/pcs/test/tools/color_text_runner.py
+++ b/pcs/test/tools/color_text_runner.py
@@ -5,12 +5,7 @@ from __future__ import (
     unicode_literals,
 )
 
-import sys
-major, minor = sys.version_info[:2]
-if major == 2 and minor == 6:
-    import unittest2 as unittest
-else:
-    import unittest
+from pcs.test.tools import pcs_unittest as unittest
 
 
 palete = {
@@ -37,7 +32,7 @@ palete = {
 def apply(key_list, text):
     return("".join([palete[key] for key in key_list]) + text + palete["end"])
 
-TextTestResult = unittest.runner.TextTestResult
+TextTestResult = unittest.TextTestResult
 #pylint: disable=bad-super-call
 class ColorTextTestResult(TextTestResult):
     def addSuccess(self, test):
diff --git a/pcs/test/tools/misc.py b/pcs/test/tools/misc.py
index a78ccdc..745b228 100644
--- a/pcs/test/tools/misc.py
+++ b/pcs/test/tools/misc.py
@@ -10,6 +10,7 @@ import os.path
 import re
 
 from pcs import utils
+from pcs.test.tools.pcs_unittest import mock
 
 
 testdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
@@ -50,3 +51,16 @@ def is_minimum_pacemaker_version(cmajor, cminor, crev):
         or
         (major == cmajor and minor == cminor and rev >= crev)
     )
+
+def create_patcher(target_prefix):
+    """
+    Return function for patching tests with preconfigured target prefix
+    string target_prefix is prefix for patched names. Typicaly tested module
+    like for example "pcs.lib.commands.booth". Between target_prefix and target
+    is "." (dot)
+    """
+    def patch(target, *args, **kwargs):
+        return mock.patch(
+            "{0}.{1}".format(target_prefix, target), *args, **kwargs
+        )
+    return patch
diff --git a/pcs/test/tools/pcs_mock.py b/pcs/test/tools/pcs_mock.py
deleted file mode 100644
index d84ac67..0000000
--- a/pcs/test/tools/pcs_mock.py
+++ /dev/null
@@ -1,13 +0,0 @@
-try:
-    import unittest.mock as mock
-except ImportError:
-    import mock
-
-if not hasattr(mock.Mock, "assert_not_called"):
-    def __assert_not_called(self, *args, **kwargs):
-        if self.call_count != 0:
-            msg = ("Expected '%s' to not have been called. Called %s times." %
-                   (self._mock_name or 'mock', self.call_count))
-            raise AssertionError(msg)
-    mock.Mock.assert_not_called = __assert_not_called
-
diff --git a/pcs/test/tools/pcs_unittest.py b/pcs/test/tools/pcs_unittest.py
new file mode 100644
index 0000000..7b7b37a
--- /dev/null
+++ b/pcs/test/tools/pcs_unittest.py
@@ -0,0 +1,181 @@
+import sys
+#In package unittest there is no module mock before python 3.3. In python 3
+#module mock is not imported by * because module mock is not imported in
+#unittest/__init__.py
+major, minor = sys.version_info[:2]
+if major == 2 and minor == 6:
+    #we use features that are missing before 2.7 (like test skipping,
+    #assertRaises as context manager...) so we need unittest2
+    from unittest2 import *
+    import mock
+else:
+    from unittest import *
+    try:
+        import unittest.mock as mock
+    except ImportError:
+        import mock
+
+#backport of assert_not_called (new in version 3.5)
+if not hasattr(mock.Mock, "assert_not_called"):
+    def __assert_not_called(self, *args, **kwargs):
+        if self.call_count != 0:
+            msg = ("Expected '%s' to not have been called. Called %s times." %
+                   (self._mock_name or 'mock', self.call_count))
+            raise AssertionError(msg)
+    mock.Mock.assert_not_called = __assert_not_called
+
+
+if not hasattr(mock, "mock_open"):
+    def create_mock_open(MagicMock, DEFAULT, inPy3k):
+        """
+        Backport mock_open for older mock versions.
+        Backport is taken from mock package. Original code is slightly adapted:
+         * code is covered by create_mock_open
+         * MagicMock, DEFAULT, and inPy3k flag was originally module globals
+         * file_spec was originally plain value (and in mock_open referenced as
+           global) but we want keep namespace of this module as clean as
+           possible so now is file_spec encapsulated.
+        """
+        file_spec = [None]
+
+        def _iterate_read_data(read_data):
+            # Helper for mock_open:
+            # Retrieve lines from read_data via a generator so that separate
+            # calls to readline, read, and readlines are properly interleaved
+            data_as_list = ['{0}\n'.format(l) for l in read_data.split('\n')]
+
+            if data_as_list[-1] == '\n':
+                # If the last line ended in a newline, the list comprehension
+                #will #have an extra entry that's just a newline.  Remove this.
+                data_as_list = data_as_list[:-1]
+            else:
+                # If there wasn't an extra newline by itself, then the file
+                # being emulated doesn't have a newline to end the last line
+                # remove the newline that our naive format() added
+                data_as_list[-1] = data_as_list[-1][:-1]
+
+            for line in data_as_list:
+                yield line
+
+        def mock_open(mock=None, read_data=''):
+            """
+            A helper function to create a mock to replace the use of `open`. It
+            works for `open` called directly or used as a context manager.
+
+            The `mock` argument is the mock object to configure. If `None` (the
+            default) then a `MagicMock` will be created for you, with the API
+            limited to methods or attributes available on standard file
+            handles.
+
+            `read_data` is a string for the `read` methoddline`, and
+            `readlines` of the file handle to return.  This is an empty string
+            by default.
+            """
+            def _readlines_side_effect(*args, **kwargs):
+                if handle.readlines.return_value is not None:
+                    return handle.readlines.return_value
+                return list(_state[0])
+
+            def _read_side_effect(*args, **kwargs):
+                if handle.read.return_value is not None:
+                    return handle.read.return_value
+                return ''.join(_state[0])
+
+            def _readline_side_effect():
+                if handle.readline.return_value is not None:
+                    while True:
+                        yield handle.readline.return_value
+                for line in _state[0]:
+                    yield line
+
+            if file_spec[0] is None:
+                # set on first use
+                if inPy3k:
+                    import _io
+                    file_spec[0] = list(
+                        set(dir(_io.TextIOWrapper))
+                            .union(set(dir(_io.BytesIO)))
+                    )
+                else:
+                    file_spec[0] = file
+
+            if mock is None:
+                mock = MagicMock(name='open', spec=open)
+
+            handle = MagicMock(spec=file_spec[0])
+            handle.__enter__.return_value = handle
+
+            _state = [_iterate_read_data(read_data), None]
+
+            handle.write.return_value = None
+            handle.read.return_value = None
+            handle.readline.return_value = None
+            handle.readlines.return_value = None
+
+            handle.read.side_effect = _read_side_effect
+            _state[1] = _readline_side_effect()
+            handle.readline.side_effect = _state[1]
+            handle.readlines.side_effect = _readlines_side_effect
+
+            def reset_data(*args, **kwargs):
+                _state[0] = _iterate_read_data(read_data)
+                if handle.readline.side_effect == _state[1]:
+                    # Only reset the side effect if the user hasn't overridden
+                    #it.
+                    _state[1] = _readline_side_effect()
+                    handle.readline.side_effect = _state[1]
+                return DEFAULT
+
+            mock.side_effect = reset_data
+            mock.return_value = handle
+            return mock
+        return mock_open
+
+    mock.mock_open = create_mock_open(
+        mock.MagicMock,
+        mock.DEFAULT,
+        inPy3k=(major==3),
+    )
+    del create_mock_open
+
+def ensure_raise_from_iterable_side_effect():
+    """
+    Adjust mock.Mock to raise when side_effect is iterable and efect item
+    applied in the current call is exception (class or instance) and this
+    exception is simply returned (in older version of mock).
+    """
+    def create_new_call(old_call, inPy3k):
+        class OldStyleClass:
+            pass
+        ClassTypes = (type,) if inPy3k else (type, type(OldStyleClass))
+
+        def is_exception(obj):
+            return isinstance(obj, BaseException) or (
+                isinstance(obj, ClassTypes)
+                and
+                issubclass(obj, BaseException)
+            )
+
+        def new_call(_mock_self, *args, **kwargs):
+            """
+            Wrap original call.
+            If side_effect is itterable and result is an exception then we
+            raise this exception. Newer versions of mock it makes itself (so
+            in this case exception is raised from old_call) but we need it
+            for the old versions as well.
+            """
+            call_result = old_call(_mock_self, *args, **kwargs)
+            try:
+                iter(_mock_self.side_effect)
+            except TypeError:
+                return call_result
+
+            if is_exception(call_result):
+                raise call_result
+            return call_result
+
+        return new_call
+    mock.Mock.__call__ = create_new_call(mock.Mock.__call__, inPy3k=(major==3))
+ensure_raise_from_iterable_side_effect()
+
+del major, minor, sys
diff --git a/pcs/usage.py b/pcs/usage.py
index ee53a2f..ea407c3 100644
--- a/pcs/usage.py
+++ b/pcs/usage.py
@@ -21,6 +21,7 @@ def full_usage():
     out += strip_extras(acl([],False))
     out += strip_extras(qdevice([],False))
     out += strip_extras(quorum([],False))
+    out += strip_extras(booth([],False))
     out += strip_extras(status([],False))
     out += strip_extras(config([],False))
     out += strip_extras(pcsd([],False))
@@ -117,6 +118,7 @@ def generate_completion_tree_from_usage():
     tree["pcsd"] = generate_tree(pcsd([],False))
     tree["node"] = generate_tree(node([], False))
     tree["alert"] = generate_tree(alert([], False))
+    tree["booth"] = generate_tree(booth([], False))
     return tree
 
 def generate_tree(usage_txt):
@@ -167,6 +169,7 @@ Commands:
     acl         Set pacemaker access control lists.
     qdevice     Manage quorum device provider.
     quorum      Manage cluster quorum settings.
+    booth       Manage booth (cluster ticket manager).
     status      View cluster status.
     config      View and manage cluster configuration.
     pcsd        Manage pcs daemon.
@@ -187,12 +190,12 @@ Usage: pcs resource [commands]...
 Manage pacemaker resources
 
 Commands:
-    [show [resource id]] [--full] [--groups]
+    [show [<resource id>] | --full | --groups | --hide-inactive]
         Show all currently configured resources or if a resource is specified
-        show the options for the configured resource.  If --full is specified
+        show the options for the configured resource.  If --full is specified,
         all configured resource options will be displayed.  If --groups is
-        specified, only show groups (and their resources).
-
+        specified, only show groups (and their resources).  If --hide-inactive
+        is specified, only show active resources.
 
     list [<standard|provider|type>] [--nodesc]
         Show list of all available resources, optionally filtered by specified
@@ -681,6 +684,7 @@ Commands:
         the whole CIB or be warned in the case of outdated CIB.
 
     node add <node[,node-altaddr]> [--start [--wait[=<n>]]] [--enable]
+            [--watchdog=<watchdog-path>]
         Add the node to corosync.conf and corosync on all nodes in the cluster
         and sync the new corosync.conf to the new node.  If --start is
         specified also start corosync/pacemaker on the new node, if --wait is
@@ -688,6 +692,8 @@ Commands:
         is specified enable corosync/pacemaker on new node.
         When using Redundant Ring Protocol (RRP) with udpu transport, specify
         the ring 0 address first followed by a ',' and then the ring 1 address.
+        Use --watchdog to specify path to watchdog on newly added node, when SBD
+        is enabled in cluster.
 
     node remove <node>
         Shutdown specified node and remove it from pacemaker and corosync on
@@ -990,21 +996,24 @@ Commands:
         List all current ticket constraints (if --full is specified show
         the internal constraint id's as well).
 
-    ticket add <ticket> [<role>] <resource id> [options]
-               [id=constraint-id]
+    ticket add <ticket> [<role>] <resource id> [<options>]
+               [id=<constraint-id>]
         Create a ticket constraint for <resource id>.
         Available option is loss-policy=fence/stop/freeze/demote.
         A role can be master, slave, started or stopped.
 
-    ticket set <resource1> [resourceN]... [options]
-               [set <resourceX> ... [options]]
-               [setoptions [constraint_options]]
+    ticket set <resource1> [<resourceN>]... [<options>]
+               [set <resourceX> ... [<options>]]
+               setoptions <constraint_options>
         Create a ticket constraint with a resource set.
         Available options are sequential=true/false, require-all=true/false,
         action=start/promote/demote/stop and role=Stopped/Started/Master/Slave.
         Required constraint option is ticket=<ticket>. Optional constraint
         options are id=<constraint-id> and loss-policy=fence/stop/freeze/demote.
 
+    ticket remove <ticket> <resource id>
+        Remove all ticket constraints with <ticket> from <resource id>.
+
     remove [constraint id]...
         Remove constraint(s) or constraint rules with the specified id(s).
 
@@ -1106,8 +1115,12 @@ Commands:
         View all information about the cluster and resources (--full provides
         more details, --hide-inactive hides inactive resources).
 
-    resources
-        View current status of cluster resources.
+    resources [<resource id> | --full | --groups | --hide-inactive]
+        Show all currently configured resources or if a resource is specified
+        show the options for the configured resource.  If --full is specified,
+        all configured resource options will be displayed.  If --groups is
+        specified, only show groups (and their resources).  If --hide-inactive
+        is specified, only show active resources.
 
     groups
         View currently configured groups and their resources.
@@ -1118,6 +1131,14 @@ Commands:
     corosync
         View current membership information as seen by corosync.
 
+    quorum
+        View current quorum status.
+
+    qdevice <device model> [--full] [<cluster name>]
+        Show runtime status of specified model of quorum device provider.  Using
+        --full will give more detailed output.  If <cluster name> is specified,
+        only information about the specified cluster will be displayed.
+
     nodes [corosync|both|config]
         View current status of nodes from pacemaker. If 'corosync' is
         specified, print nodes currently configured in corosync, if 'both'
@@ -1165,7 +1186,7 @@ Commands:
         Restore cluster configuration to specified checkpoint.
 
     import-cman output=<filename> [input=<filename>] [--interactive]
-            [output-format=corosync.conf|cluster.conf]
+            [output-format=corosync.conf|cluster.conf] [dist=<dist>]
         Converts CMAN cluster configuration to Pacemaker cluster configuration.
         Converted configuration will be saved to 'output' file.  To send
         the configuration to the cluster nodes the 'pcs config restore'
@@ -1173,20 +1194,30 @@ Commands:
         prompted to solve incompatibilities manually.  If no input is specified
         /etc/cluster/cluster.conf will be used.  You can force to create output
         containing either cluster.conf or corosync.conf using the output-format
-        option.
+        option.  Optionally you can specify output version by setting 'dist'
+        option e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.  You
+        can get the list of supported dist values by running the "clufter
+        --list-dists" command.  If 'dist' is not specified, it defaults to this
+        node's version if that matches output-format, otherwise redhat,6.7 is
+        used for cluster.conf and redhat,7.1 is used for corosync.conf.
 
     import-cman output=<filename> [input=<filename>] [--interactive]
-            output-format=pcs-commands|pcs-commands-verbose
+            output-format=pcs-commands|pcs-commands-verbose [dist=<dist>]
         Converts CMAN cluster configuration to a list of pcs commands which
         recreates the same cluster as Pacemaker cluster when executed.  Commands
         will be saved to 'output' file.  For other options see above.
 
-    export pcs-commands|pcs-commands-verbose output=<filename>
+    export pcs-commands|pcs-commands-verbose [output=<filename>] [dist=<dist>]
         Creates a list of pcs commands which upon execution recreates
         the current cluster running on this node.  Commands will be saved
-        to 'output' file.  Use pcs-commands to get a simple list of commands,
-        whereas pcs-commands-verbose creates a list including comments and debug
-        messages.
+        to 'output' file or written to stdout if 'output' is not specified.  Use
+        pcs-commands to get a simple list of commands, whereas
+        pcs-commands-verbose creates a list including comments and debug
+        messages.  Optionally specify output version by setting 'dist' option
+        e. g. rhel,6.8 or redhat,7.3 or debian,7 or ubuntu,trusty.  You can get
+        the list of supported dist values by running the "clufter --list-dists"
+        command.  If 'dist' is not specified, it defaults to this node's
+        version.
 """
     if pout:
         print(sub_usage(args, output))
@@ -1227,6 +1258,14 @@ Usage: pcs node <command>
 Manage cluster nodes
 
 Commands:
+    attribute [[<node>] [--name <name>] | <node> <name>=<value> ...]
+        Manage node attributes.  If no parameters are specified, show attributes
+        of all nodes.  If one parameter is specified, show attributes
+        of specified node.  If --name is specified, show specified attribute's
+        value from all nodes.  If more parameters are specified, set attributes
+        of specified node.  Attributes can be removed by setting an attribute
+        without a value.
+
     maintenance [--all] | [<node>]...
         Put specified node(s) into maintenance mode, if no node or options are
         specified the current node will be put into maintenance mode, if --all
@@ -1257,12 +1296,13 @@ Commands:
         the operation not succeeded yet.  If 'n' is not specified it defaults
         to 60 minutes.
 
-    utilization [<node> [<name>=<value> ...]]
-        Add specified utilization options to specified node. If node is not
-        specified, shows utilization of all nodes. If utilization options are
-        not specified, shows utilization of specified node. Utilization option
-        should be in format name=value, value has to be integer. Options may be
-        removed by setting an option without a value.
+    utilization [[<node>] [--name <name>] | <node> <name>=<value> ...]
+        Add specified utilization options to specified node.  If node is not
+        specified, shows utilization of all nodes.  If --name is specified,
+        shows specified utilization value from all nodes. If utilization options
+        are not specified, shows utilization of specified node.  Utilization
+        option should be in format name=value, value has to be integer.  Options
+        may be removed by setting an option without a value.
         Example: pcs node utilization node1 cpu=4 ram=
 """
     if pout:
@@ -1322,7 +1362,7 @@ Usage: pcs quorum <command>
 Manage cluster quorum settings.
 
 Commands:
-    config
+    [config]
         Show quorum configuration.
 
     status
@@ -1380,6 +1420,82 @@ Commands:
     else:
         return output
 
+def booth(args=[], pout=True):
+    output = """
+Usage: pcs booth <command>
+Manage booth (cluster ticket manager)
+
+Commands:
+    setup sites <address> <address> [<address>...] [arbitrators <address> ...]
+            [--force]
+        Write new booth configuration with specified sites and arbitrators.
+        Total number of peers (sites and arbitrators) must be odd.  When
+        the configuration file already exists, command fails unless --force
+        is specified.
+
+    destroy
+        Remove booth configuration files.
+
+    ticket add <ticket> [<name>=<value> ...]
+        Add new ticket to the current configuration. Ticket options are
+        specified in booth manpage.
+
+    ticket remove <ticket>
+        Remove the specified ticket from the current configuration.
+
+    config [<node>]
+        Show booth configuration from the specified node or from the current
+        node if node not specified.
+
+    create ip <address>
+        Make the cluster run booth service on the specified ip address as
+        a cluster resource.  Typically this is used to run booth site.
+
+    remove
+        Remove booth resources created by the "pcs booth create" command.
+
+    restart
+        Restart booth resources created by the "pcs booth create" command.
+
+    ticket grant <ticket> [<site address>]
+        Grant the ticket for the site specified by address.  Site address which
+        has been specified with 'pcs booth create' command is used if
+        'site address' is omitted.  Specifying site address is mandatory when
+        running this command on an arbitrator.
+
+    ticket revoke <ticket> [<site address>]
+        Revoke the ticket for the site specified by address.  Site address which
+        has been specified with 'pcs booth create' command is used if
+        'site address' is omitted.  Specifying site address is mandatory when
+        running this command on an arbitrator.
+
+    status
+        Print current status of booth on the local node.
+
+    pull <node>
+        Pull booth configuration from the specified node.
+
+    sync [--skip-offline]
+        Send booth configuration from the local node to all nodes
+        in the cluster.
+
+    enable
+        Enable booth arbitrator service.
+
+    disable
+        Disable booth arbitrator service.
+
+    start
+        Start booth arbitrator service.
+
+    stop
+        Stop booth arbitrator service.
+"""
+    if pout:
+        print(sub_usage(args, output))
+    else:
+        return output
+
 
 def alert(args=[], pout=True):
     output = """
@@ -1392,25 +1508,27 @@ Commands:
 
     create path=<path> [id=<alert-id>] [description=<description>]
             [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
-        Create new alert with specified path. Id will be automatically
+        Define an alert handler with specified path. Id will be automatically
         generated if it is not specified.
 
     update <alert-id> [path=<path>] [description=<description>]
             [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
-        Update existing alert with specified id.
+        Update existing alert handler with specified id.
 
     remove <alert-id>
-        Remove alert with specified id.
+        Remove alert handler with specified id.
 
-    recipient add <alert-id> <recipient-value> [description=<description>]
-            [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
-        Add new recipient to specified alert.
+    recipient add <alert-id> value=<recipient-value> [id=<recipient-id>]
+            [description=<description>] [options [<option>=<value>]...]
+            [meta [<meta-option>=<value>]...]
+        Add new recipient to specified alert handler.
 
-    recipient update <alert-id> <recipient-value> [description=<description>]
-            [options [<option>=<value>]...] [meta [<meta-option>=<value>]...]
-        Update existing recipient identified by alert and it's value.
+    recipient update <recipient-id> [value=<recipient-value>]
+            [description=<description>] [options [<option>=<value>]...]
+            [meta [<meta-option>=<value>]...]
+        Update existing recipient identified by it's id.
 
-    recipient remove <alert-id> <recipient-value>
+    recipient remove <recipient-id>
         Remove specified recipient.
 """
     if pout:
@@ -1431,6 +1549,7 @@ def show(main_usage_name, rest_usage_names):
         "property": property,
         "qdevice": qdevice,
         "quorum": quorum,
+        "booth": booth,
         "resource": resource,
         "status": status,
         "stonith": stonith,
diff --git a/pcs/utils.py b/pcs/utils.py
index 171fbdd..1e99bc9 100644
--- a/pcs/utils.py
+++ b/pcs/utils.py
@@ -25,35 +25,6 @@ import base64
 import threading
 import logging
 
-try:
-    # python2
-    from urllib import urlencode as urllib_urlencode
-except ImportError:
-    # python3
-    from urllib.parse import urlencode as urllib_urlencode
-try:
-    # python2
-    from urllib2 import (
-        build_opener as urllib_build_opener,
-        install_opener as urllib_install_opener,
-        HTTPCookieProcessor as urllib_HTTPCookieProcessor,
-        HTTPSHandler as urllib_HTTPSHandler,
-        HTTPError as urllib_HTTPError,
-        URLError as urllib_URLError
-    )
-except ImportError:
-    # python3
-    from urllib.request import (
-        build_opener as urllib_build_opener,
-        install_opener as urllib_install_opener,
-        HTTPCookieProcessor as urllib_HTTPCookieProcessor,
-        HTTPSHandler as urllib_HTTPSHandler
-    )
-    from urllib.error import (
-        HTTPError as urllib_HTTPError,
-        URLError as urllib_URLError
-    )
-
 
 from pcs import settings, usage
 from pcs.cli.common.reports import (
@@ -61,7 +32,7 @@ from pcs.cli.common.reports import (
     LibraryReportProcessorToConsole as LibraryReportProcessorToConsole,
 )
 from pcs.common.tools import simple_cache
-from pcs.lib import reports
+from pcs.lib import reports, sbd
 from pcs.lib.env import LibraryEnvironment
 from pcs.lib.errors import LibraryError
 from pcs.lib.external import (
@@ -89,10 +60,46 @@ from pcs.lib.pacemaker_values import(
 from pcs.cli.common import middleware
 from pcs.cli.common.env import Env
 from pcs.cli.common.lib_wrapper import Library
+from pcs.cli.booth.command import DEFAULT_BOOTH_NAME
+import pcs.cli.booth.env
+
+
+try:
+    # python2
+    from urllib import urlencode as urllib_urlencode
+except ImportError:
+    # python3
+    from urllib.parse import urlencode as urllib_urlencode
+try:
+    # python2
+    from urllib2 import (
+        build_opener as urllib_build_opener,
+        install_opener as urllib_install_opener,
+        HTTPCookieProcessor as urllib_HTTPCookieProcessor,
+        HTTPSHandler as urllib_HTTPSHandler,
+        HTTPError as urllib_HTTPError,
+        URLError as urllib_URLError
+    )
+except ImportError:
+    # python3
+    from urllib.request import (
+        build_opener as urllib_build_opener,
+        install_opener as urllib_install_opener,
+        HTTPCookieProcessor as urllib_HTTPCookieProcessor,
+        HTTPSHandler as urllib_HTTPSHandler
+    )
+    from urllib.error import (
+        HTTPError as urllib_HTTPError,
+        URLError as urllib_URLError
+    )
+
+
 
 
 PYTHON2 = sys.version[0] == "2"
 
+DEFAULT_RESOURCE_ACTIONS = ["monitor", "start", "stop", "promote", "demote"]
+
 # usefile & filename variables are set in pcs module
 usefile = False
 filename = ""
@@ -301,6 +308,8 @@ def canAddNodeToCluster(node):
                 return (False, "unable to authenticate to node")
             if "node_available" in myout and myout["node_available"] == True:
                 return (True, "")
+            elif myout.get("pacemaker_remote", False):
+                return (False, "node is running pacemaker_remote")
             else:
                 return (False, "node is already in a cluster")
         except ValueError:
@@ -384,7 +393,8 @@ def sendHTTPRequest(host, request, data = None, printResult = True, printSuccess
                 if "CIB_user" == name:
                     value = re.sub(r"[^!-~]", "", value).replace(";", "")
                 else:
-                    value = base64.b64encode(value)
+                    # python3 requires the value to be bytes not str
+                    value = base64.b64encode(value.encode("utf8"))
                 cookies.append("{0}={1}".format(name, value))
     if cookies:
         opener.addheaders.append(('Cookie', ";".join(cookies)))
@@ -465,6 +475,24 @@ def getNodesFromPacemaker():
     except LibraryError as e:
         process_library_reports(e.args)
 
+def getNodeAttributesFromPacemaker():
+    try:
+        return [
+            node.attrs
+            for node in ClusterState(getClusterStateXml()).node_section.nodes
+        ]
+    except LibraryError as e:
+        process_library_reports(e.args)
+
+
+def hasCorosyncConf(conf=None):
+    if not conf:
+        if is_rhel6():
+            conf = settings.cluster_conf_file
+        else:
+            conf = settings.corosync_conf_file
+    return os.path.isfile(conf)
+
 def getCorosyncConf(conf=None):
     if not conf:
         if is_rhel6():
@@ -559,6 +587,23 @@ def getCorosyncActiveNodes():
 
     return nodes_active
 
+
+def _enable_auto_tie_breaker_for_sbd(corosync_conf):
+    """
+    Enable auto tie breaker in specified corosync conf if it is needed by SBD.
+
+    corosync_conf -- parsed corosync conf
+    """
+    try:
+        corosync_facade = corosync_conf_facade(corosync_conf)
+        if sbd.atb_has_to_be_enabled(cmd_runner(), corosync_facade):
+            corosync_facade.set_quorum_options(
+                get_report_processor(), {"auto_tie_breaker": "1"}
+            )
+    except LibraryError as e:
+        process_library_reports(e.args)
+
+
 # Add node specified to corosync.conf and reload corosync.conf (if running)
 def addNodeToCorosync(node):
 # Before adding, make sure node isn't already in corosync.conf
@@ -585,6 +630,9 @@ def addNodeToCorosync(node):
         new_node.add_attribute("ring1_addr", node1)
     new_node.add_attribute("nodeid", new_nodeid)
 
+    # enable ATB if it's needed
+    _enable_auto_tie_breaker_for_sbd(corosync_conf)
+
     corosync_conf = autoset_2node_corosync(corosync_conf)
     setCorosyncConf(str(corosync_conf))
     return True
@@ -612,12 +660,40 @@ def addNodeToClusterConf(node):
                 "error adding alternative address for node: %s" % node0
             )
 
+    # ensure the pacemaker fence device exists
+    pcmk_fence_name = None
+    all_fence_names = set()
+    output, retval = run([
+        "ccs", "-i", "-f", settings.cluster_conf_file, "--lsfencedev"
+    ])
+    if retval == 0:
+        for line in output.splitlines():
+            fence_name, fence_args = line.split(":", 1)
+            all_fence_names.add(fence_name)
+            match = re.match("(^|(.* ))agent=fence_pcmk((,.+)|$)", line)
+            if match:
+                pcmk_fence_name = fence_name
+    if not pcmk_fence_name:
+        fence_index = 1
+        pcmk_fence_name = "pcmk-redirect"
+        while pcmk_fence_name in all_fence_names:
+            pcmk_fence_name = "pcmk-redirect-{0}".format(fence_index)
+            fence_index += 1
+
+        output, retval = run([
+            "ccs", "-i", "-f", settings.cluster_conf_file,
+            "--addfencedev", pcmk_fence_name, "agent=fence_pcmk",
+        ])
+        if retval != 0:
+            print(output)
+            err("error fence device for node: %s" % node)
+
     output, retval = run(["ccs", "-i", "-f", settings.cluster_conf_file, "--addmethod", "pcmk-method", node0])
     if retval != 0:
         print(output)
         err("error adding fence method: %s" % node)
 
-    output, retval = run(["ccs", "-i", "-f", settings.cluster_conf_file, "--addfenceinst", "pcmk-redirect", node0, "pcmk-method", "port="+node0])
+    output, retval = run(["ccs", "-i", "-f", settings.cluster_conf_file, "--addfenceinst", pcmk_fence_name, node0, "pcmk-method", "port="+node0])
     if retval != 0:
         print(output)
         err("error adding fence instance: %s" % node)
@@ -652,6 +728,9 @@ def removeNodeFromCorosync(node):
                     removed_node = True
 
     if removed_node:
+        # enable ATB if it's needed
+        _enable_auto_tie_breaker_for_sbd(corosync_conf)
+
         corosync_conf = autoset_2node_corosync(corosync_conf)
         setCorosyncConf(str(corosync_conf))
 
@@ -1071,18 +1150,6 @@ def does_exist(xpath_query):
         return False
     return True
 
-def is_pacemaker_node(node):
-    p_nodes = getNodesFromPacemaker()
-    if node in p_nodes:
-        return True
-    return False
-
-def is_corosync_node(node):
-    c_nodes = getNodesFromCorosyncConf()
-    if node in c_nodes:
-        return True
-    return False
-
 def get_group_children(group_id):
     child_resources = []
     dom = get_cib_dom()
@@ -1254,6 +1321,14 @@ def validate_constraint_resource(dom, resource_id):
 def dom_get_resource_remote_node_name(dom_resource):
     if dom_resource.tagName != "primitive":
         return None
+    if (
+        dom_resource.getAttribute("class").lower() == "ocf"
+        and
+        dom_resource.getAttribute("provider").lower() == "pacemaker"
+        and
+        dom_resource.getAttribute("type").lower() == "remote"
+    ):
+        return dom_resource.getAttribute("id")
     return dom_get_meta_attr_value(dom_resource, "remote-node")
 
 def dom_get_meta_attr_value(dom_resource, meta_name):
@@ -1400,21 +1475,33 @@ def does_resource_have_options(ra_type):
         return True
     return False
 
+def filter_default_op_from_actions(resource_actions):
+    filtered = []
+    for action in resource_actions:
+        if action.get("name", "") not in DEFAULT_RESOURCE_ACTIONS:
+            continue
+        new_action = dict([
+            (name, value)
+            for name, value in sorted(action.items())
+            if name != "depth"
+        ])
+        filtered.append(new_action)
+    return filtered
+
 # Given a resource agent (ocf:heartbeat:XXX) return an list of default
 # operations or an empty list if unable to find any default operations
 def get_default_op_values(ra_type):
-    allowable_operations = ["monitor", "start", "stop", "promote", "demote"]
     default_ops = []
     try:
         metadata = lib_ra.get_resource_agent_metadata(cmd_runner(), ra_type)
-        actions = lib_ra.get_agent_actions(metadata)
+        actions = filter_default_op_from_actions(
+            lib_ra.get_agent_actions(metadata)
+        )
 
         for action in actions:
-            if action["name"] not in allowable_operations:
-                continue
             op = [action["name"]]
             for key in action.keys():
-                if key != "name" and (key != "depth" or action[key] != "0"):
+                if key != "name" and action[key] != "0":
                     op.append("{0}={1}".format(key, action[key]))
             default_ops.append(op)
     except (
@@ -1591,15 +1678,35 @@ def is_valid_cib_scope(scope):
 # Checks to see if id exists in the xml dom passed
 # DEPRECATED use lxml version available in pcs.lib.cib.tools
 def does_id_exist(dom, check_id):
+    # do not search in /cib/status, it may contain references to previously
+    # existing and deleted resources and thus preventing creating them again
     if is_etree(dom):
-        for elem in dom.findall(str(".//*")):
+        for elem in dom.findall(str(
+            '(/cib/*[name()!="status"]|/*[name()!="cib"])/*'
+        )):
             if elem.get("id") == check_id:
                 return True
     else:
-        all_elem = dom.getElementsByTagName("*")
-        for elem in all_elem:
-            if elem.getAttribute("id") == check_id:
-                return True
+        document = (
+            dom
+            if isinstance(dom, xml.dom.minidom.Document)
+            else dom.ownerDocument
+        )
+        cib_found = False
+        for cib in dom_get_children_by_tag_name(document, "cib"):
+            cib_found = True
+            for section in cib.childNodes:
+                if section.nodeType != xml.dom.minidom.Node.ELEMENT_NODE:
+                    continue
+                if section.tagName == "status":
+                    continue
+                for elem in section.getElementsByTagName("*"):
+                    if elem.getAttribute("id") == check_id:
+                        return True
+        if not cib_found:
+            for elem in document.getElementsByTagName("*"):
+                if elem.getAttribute("id") == check_id:
+                    return True
     return False
 
 # Returns check_id if it doesn't exist in the dom, otherwise it adds an integer
@@ -1661,19 +1768,26 @@ def set_unmanaged(resource):
             "is-managed", "--meta", "--parameter-value", "false"]
     return run(args)
 
-def get_node_attributes():
+def get_node_attributes(filter_node=None, filter_attr=None):
     node_config = get_cib_xpath("//nodes")
-    nas = {}
     if (node_config == ""):
         err("unable to get crm_config, is pacemaker running?")
     dom = parseString(node_config).documentElement
+    nas = dict()
     for node in dom.getElementsByTagName("node"):
         nodename = node.getAttribute("uname")
+        if filter_node is not None and nodename != filter_node:
+            continue
         for attributes in node.getElementsByTagName("instance_attributes"):
             for nvp in attributes.getElementsByTagName("nvpair"):
+                attr_name = nvp.getAttribute("name")
+                if filter_attr is not None and attr_name != filter_attr:
+                    continue
                 if nodename not in nas:
-                    nas[nodename] = []
-                nas[nodename].append(nvp.getAttribute("name") + "=" + nvp.getAttribute("value"))
+                    nas[nodename] = dict()
+                nas[nodename][attr_name] = nvp.getAttribute("value")
+            # Use just first element of attributes. We don't support
+            # attributes with rules just yet.
             break
     return nas
 
@@ -1727,7 +1841,7 @@ def set_cib_property(prop, value, cib_dom=None):
     if update_cib:
         replace_cib_configuration(crm_config)
 
-def setAttribute(a_type, a_name, a_value):
+def setAttribute(a_type, a_name, a_value, exit_on_error=False):
     args = ["crm_attribute", "--type", a_type, "--attr-name", a_name,
             "--attr-value", a_value]
 
@@ -1736,7 +1850,10 @@ def setAttribute(a_type, a_name, a_value):
 
     output, retval = run(args)
     if retval != 0:
-        print(output)
+        if exit_on_error:
+            err(output)
+        else:
+            print(output)
 
 def getTerminalSize(fd=1):
     """
@@ -1763,14 +1880,24 @@ def get_terminal_input(message=None):
     if message:
         sys.stdout.write(message)
         sys.stdout.flush()
-    if PYTHON2:
-        return raw_input("")
-    else:
-        return input("")
+    try:
+        if PYTHON2:
+            return raw_input("")
+        else:
+            return input("")
+    except EOFError:
+        return ""
+    except KeyboardInterrupt:
+        print("Interrupted")
+        sys.exit(1)
 
 def get_terminal_password(message="Password: "):
-    if sys.stdout.isatty():
-        return getpass.getpass(message)
+    if sys.stdin.isatty():
+        try:
+            return getpass.getpass(message)
+        except KeyboardInterrupt:
+            print("Interrupted")
+            sys.exit(1)
     else:
         return get_terminal_input(message)
 
@@ -1798,22 +1925,26 @@ def stonithCheck():
                         prop.attrib["value"] == "false":
                     return False
 
-    primitives = et.findall(str("configuration/resources/primitive"))
-    for p in primitives:
-        if p.attrib["class"] == "stonith":
-            return False
-
-    primitives = et.findall(str("configuration/resources/clone/primitive"))
-    for p in primitives:
-        if p.attrib["class"] == "stonith":
-            return False
+    xpath_list = (
+        "configuration/resources/primitive",
+        "configuration/resources/group/primitive",
+        "configuration/resources/clone/primitive",
+        "configuration/resources/clone/group/primitive",
+        "configuration/resources/master/primitive",
+        "configuration/resources/master/group/primitive",
+    )
+    for xpath in xpath_list:
+        for p in et.findall(str(xpath)):
+            if ("class" in p.attrib) and (p.attrib["class"] == "stonith"):
+                return False
 
-    # check if SBD daemon is running
-    try:
-        if is_service_running(cmd_runner(), "sbd"):
-            return False
-    except LibraryError:
-        pass
+    if not usefile:
+        # check if SBD daemon is running
+        try:
+            if is_service_running(cmd_runner(), "sbd"):
+                return False
+        except LibraryError:
+            pass
 
     return True
 
@@ -1837,7 +1968,7 @@ def getCorosyncNodesID(allow_failure=False):
         err_msgs, retval, output, dummy_std_err = call_local_pcsd(
             ['status', 'nodes', 'corosync-id'], True
         )
-        if err_msgs:
+        if err_msgs and not allow_failure:
             for msg in err_msgs:
                 err(msg, False)
             sys.exit(1)
@@ -1851,18 +1982,21 @@ def getCorosyncNodesID(allow_failure=False):
     cs_nodes = {}
     node_list_node_mapping = {}
     for line in output.rstrip().split("\n"):
-        m = re.match("nodelist.node.(\d+).nodeid.*= (.*)",line)
+        m = re.match("nodelist\.node\.(\d+)\.nodeid.*= (.*)", line)
         if m:
             node_list_node_mapping[m.group(1)] = m.group(2)
 
     for line in output.rstrip().split("\n"):
-        m = re.match("nodelist.node.(\d+).ring0_addr.*= (.*)",line)
-        if m:
+        m = re.match("nodelist\.node\.(\d+)\.ring0_addr.*= (.*)", line)
+        # check if node id is in node_list_node_mapping - do not crash when
+        # node ids are not specified
+        if m and m.group(1) in node_list_node_mapping:
             cs_nodes[node_list_node_mapping[m.group(1)]] = m.group(2)
     return cs_nodes
 
 # Warning, if a node has never started the hostname may be '(null)'
 #TODO This doesn't work on CMAN clusters at all and should be removed completely
+# Doesn't work on pacemaker-remote nodes either
 def getPacemakerNodesID(allow_failure=False):
     if os.getuid() == 0:
         (output, retval) = run(['crm_node', '-l'])
@@ -1870,7 +2004,7 @@ def getPacemakerNodesID(allow_failure=False):
         err_msgs, retval, output, dummy_std_err = call_local_pcsd(
             ['status', 'nodes', 'pacemaker-id'], True
         )
-        if err_msgs:
+        if err_msgs and not allow_failure:
             for msg in err_msgs:
                 err(msg, False)
             sys.exit(1)
@@ -1890,9 +2024,11 @@ def getPacemakerNodesID(allow_failure=False):
     return pm_nodes
 
 def corosyncPacemakerNodeCheck():
-    # does not work on CMAN clusters
-    pm_nodes = getPacemakerNodesID()
-    cs_nodes = getCorosyncNodesID()
+    # does not work on CMAN clusters and pacemaker-remote nodes
+    # we do not want a failure to exit pcs as this is only a minor information
+    # function
+    pm_nodes = getPacemakerNodesID(allow_failure=True)
+    cs_nodes = getCorosyncNodesID(allow_failure=True)
 
     for node_id in pm_nodes:
         if pm_nodes[node_id] == "(null)":
@@ -1917,10 +2053,9 @@ def getClusterName():
     if is_rhel6():
         try:
             dom = parse(settings.cluster_conf_file)
+            return dom.documentElement.getAttribute("name")
         except (IOError,xml.parsers.expat.ExpatError):
-            return ""
-
-        return dom.documentElement.getAttribute("name")
+            pass
     else:
         try:
             f = open(settings.corosync_conf_file,'r')
@@ -1934,7 +2069,15 @@ def getClusterName():
             if cluster_name:
                 return cluster_name
         except (IOError, corosync_conf_parser.CorosyncConfParserException):
-            return ""
+            pass
+
+    # there is no corosync.conf or cluster.conf on remote nodes, we can try to
+    # get cluster name from pacemaker
+    try:
+        return get_set_properties("cluster-name")["cluster-name"]
+    except:
+        # we need to catch SystemExit (from utils.err), parse errors and so on
+        pass
 
     return ""
 
@@ -2021,23 +2164,30 @@ def serviceStatus(prefix):
     if not is_systemctl():
         return
     print("Daemon Status:")
-    for service in ["corosync", "pacemaker", "pcsd"]:
-        print('{0}{1}: {2}/{3}'.format(
-            prefix, service,
-            run(["systemctl", 'is-active', service])[0].strip(),
-            run(["systemctl", 'is-enabled', service])[0].strip()
-        ))
-    try:
-        sbd_running = is_service_running(cmd_runner(), "sbd")
-        sbd_enabled = is_service_enabled(cmd_runner(), "sbd")
-        if sbd_enabled or sbd_running:
-            print("{prefix}sbd: {active}/{enabled}".format(
-                prefix=prefix,
-                active=("active" if sbd_running else "inactive"),
-                enabled=("enabled" if sbd_enabled else "disabled")
-            ))
-    except LibraryError:
-        pass
+    service_def = [
+        # (
+        #     service name,
+        #     display even if not enabled nor running
+        # )
+        ("corosync", True),
+        ("pacemaker", True),
+        ("pacemaker_remote", False),
+        ("pcsd", True),
+        ("sbd", False),
+    ]
+    for service, display_always in service_def:
+        try:
+            running = is_service_running(cmd_runner(), service)
+            enabled = is_service_enabled(cmd_runner(), service)
+            if display_always or enabled or running:
+                print("{prefix}{service}: {active}/{enabled}".format(
+                    prefix=prefix,
+                    service=service,
+                    active=("active" if running else "inactive"),
+                    enabled=("enabled" if enabled else "disabled")
+                ))
+        except LibraryError:
+            pass
 
 def enableServices():
     # do NOT handle SBD in here, it is started by pacemaker not systemd or init
@@ -2398,7 +2548,7 @@ def dom_update_utilization(dom_element, attributes, id_prefix=""):
         id_prefix + dom_element.getAttribute("id") + "-utilization"
     )
 
-    for name, value in attributes:
+    for name, value in sorted(attributes.items()):
         if value != "" and not is_int(value):
             err(
                 "Value of utilization attribute must be integer: "
@@ -2426,21 +2576,22 @@ def dom_update_meta_attr(dom_element, attributes):
             meta_attributes.getAttribute("id") + "-"
         )
 
-def get_utilization(element):
+def get_utilization(element, filter_name=None):
     utilization = {}
     for e in element.getElementsByTagName("utilization"):
         for u in e.getElementsByTagName("nvpair"):
             name = u.getAttribute("name")
-            value = u.getAttribute("value") if u.hasAttribute("value") else ""
-            utilization[name] = value
+            if filter_name is not None and name != filter_name:
+                continue
+            utilization[name] = u.getAttribute("value")
         # Use just first element of utilization attributes. We don't support
         # utilization with rules just yet.
         break
     return utilization
 
-def get_utilization_str(element):
+def get_utilization_str(element, filter_name=None):
     output = []
-    for name, value in sorted(get_utilization(element).items()):
+    for name, value in sorted(get_utilization(element, filter_name).items()):
         output.append(name + "=" + value)
     return " ".join(output)
 
@@ -2518,33 +2669,44 @@ def get_cluster_properties_definition():
     ]
     definition = {}
     for source in sources:
-        output, retval = run([source["path"], "metadata"])
+        stdout, stderr, retval = cmd_runner().run([source["path"], "metadata"])
         if retval != 0:
-            err("unable to run {0}\n".format(source["name"]) + output)
-        etree = ET.fromstring(output)
-        for e in etree.findall("./parameters/parameter"):
-            prop = get_cluster_property_from_xml(e)
-            if prop["name"] not in banned_props:
-                prop["source"] = source["name"]
-                prop["advanced"] = prop["name"] not in basic_props
-                if prop["name"] in readable_names:
-                    prop["readable_name"] = readable_names[prop["name"]]
-                else:
-                    prop["readable_name"] = prop["name"]
-                definition[prop["name"]] = prop
+            err("unable to run {0}\n{1}".format(source["name"], stderr))
+        try:
+            etree = ET.fromstring(stdout)
+            for e in etree.findall("./parameters/parameter"):
+                prop = get_cluster_property_from_xml(e)
+                if prop["name"] not in banned_props:
+                    prop["source"] = source["name"]
+                    prop["advanced"] = prop["name"] not in basic_props
+                    if prop["name"] in readable_names:
+                        prop["readable_name"] = readable_names[prop["name"]]
+                    else:
+                        prop["readable_name"] = prop["name"]
+                    definition[prop["name"]] = prop
+        except xml.parsers.expat.ExpatError as e:
+            err("unable to parse {0} metadata definition: {1}".format(
+                source["name"],
+                e
+            ))
+        except ET.ParseError as e:
+            err("unable to parse {0} metadata definition: {1}".format(
+                source["name"],
+                e
+            ))
     return definition
 
 
 def get_cluster_property_from_xml(etree_el):
     property = {
-        "name": etree_el.get("name"),
-        "shortdesc": etree_el.find("shortdesc").text,
-        "longdesc": etree_el.find("longdesc").text
+        "name": etree_el.get("name", ""),
+        "shortdesc": "",
+        "longdesc": "",
     }
-    if property["shortdesc"] is None:
-        property["shortdesc"] = ""
-    if property["longdesc"] is None:
-        property["longdesc"] = ""
+    for item in ["shortdesc", "longdesc"]:
+        item_el = etree_el.find(item)
+        if item_el is not None and item_el.text is not None:
+            property[item] = item_el.text
 
     content = etree_el.find("content")
     if content is None:
@@ -2639,6 +2801,11 @@ def get_middleware_factory():
         cib=middleware.cib(usefile, get_cib, replace_cib_configuration),
         corosync_conf_existing=middleware.corosync_conf_existing(
             pcs_options.get("--corosync_conf", None)
+        ),
+        booth_conf=pcs.cli.booth.env.middleware_config(
+            pcs_options.get("--name", DEFAULT_BOOTH_NAME),
+            pcs_options.get("--booth-conf", None),
+            pcs_options.get("--booth-key", None),
         )
     )
 
@@ -2674,3 +2841,16 @@ def exit_on_cmdline_input_errror(error, main_name, usage_name):
 
 def get_report_processor():
     return LibraryReportProcessorToConsole(debug=("--debug" in pcs_options))
+
+def get_set_properties(prop_name=None, defaults=None):
+    properties = {} if defaults is None else dict(defaults)
+    (output, retVal) = run(["cibadmin","-Q","--scope", "crm_config"])
+    if retVal != 0:
+        err("unable to get crm_config\n"+output)
+    dom = parseString(output)
+    de = dom.documentElement
+    crm_config_properties = de.getElementsByTagName("nvpair")
+    for prop in crm_config_properties:
+        if prop_name is None or (prop_name == prop.getAttribute("name")):
+            properties[prop.getAttribute("name")] = prop.getAttribute("value")
+    return properties
diff --git a/pcsd/Gemfile b/pcsd/Gemfile
index e851eaf..ded32ae 100644
--- a/pcsd/Gemfile
+++ b/pcsd/Gemfile
@@ -9,7 +9,6 @@ gem 'sinatra-contrib'
 gem 'rack'
 gem 'rack-protection'
 gem 'tilt'
-gem 'eventmachine'
 gem 'rack-test'
 gem 'backports'
 gem 'rpam-ruby19', :platform => [:ruby_19, :ruby_20, :ruby_21, :ruby_22]
diff --git a/pcsd/Gemfile.lock b/pcsd/Gemfile.lock
index eff055a..e56c76a 100644
--- a/pcsd/Gemfile.lock
+++ b/pcsd/Gemfile.lock
@@ -3,7 +3,6 @@ GEM
   remote: https://tojeline.fedorapeople.org/rubygems/
   specs:
     backports (3.6.8)
-    eventmachine (1.2.0.1)
     json (1.8.3)
     multi_json (1.12.0)
     open4 (1.3.4)
@@ -32,7 +31,6 @@ PLATFORMS
 
 DEPENDENCIES
   backports
-  eventmachine
   json
   multi_json
   open4
diff --git a/pcsd/Makefile b/pcsd/Makefile
index 798a8bd..9a4a4ba 100644
--- a/pcsd/Makefile
+++ b/pcsd/Makefile
@@ -7,7 +7,6 @@ build_gems_rhel6:
 	mkdir -p vendor/bundle/ruby
 	gem install --verbose --no-rdoc --no-ri -l -i vendor/bundle/ruby \
 	vendor/cache/backports-3.6.8.gem \
-	vendor/cache/eventmachine-1.2.0.1.gem \
 	vendor/cache/json-1.8.3.gem \
 	vendor/cache/multi_json-1.12.1.gem \
 	vendor/cache/open4-1.3.4.gem \
diff --git a/pcsd/bootstrap.rb b/pcsd/bootstrap.rb
index 276880c..193431b 100644
--- a/pcsd/bootstrap.rb
+++ b/pcsd/bootstrap.rb
@@ -43,7 +43,7 @@ def get_pcs_path(pcsd_path)
   end
 end
 
-PCS_VERSION = '0.9.153'
+PCS_VERSION = '0.9.154'
 COROSYNC = COROSYNC_BINARIES + "corosync"
 ISRHEL6 = is_rhel6
 ISSYSTEMCTL = is_systemctl
diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb
index f54cd30..b8f363a 100644
--- a/pcsd/cluster_entity.rb
+++ b/pcsd/cluster_entity.rb
@@ -120,7 +120,7 @@ module ClusterEntity
       status = ClusterEntity::CRMResourceStatus.new
       status.id = primitive.id
       status.resource_agent = primitive.agentname
-      status.managed = false
+      status.managed = true
       status.failed = resource[:failed]
       status.role = nil
       status.active = resource[:active]
@@ -332,7 +332,7 @@ module ClusterEntity
       :unknown => {
         :val => 6,
         :str => 'unknown'
-      }
+      },
     }
 
     def initialize(status=:unknown)
@@ -1011,7 +1011,9 @@ module ClusterEntity
       @uptime = 'unknown'
       @name = nil
       @services = {}
-      [:pacemaker, :corosync, :pcsd, :cman, :sbd].each do |service|
+      [
+        :pacemaker, :pacemaker_remote, :corosync, :pcsd, :cman, :sbd
+      ].each do |service|
         @services[service] = {
           :installed => nil,
           :running => nil,
diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
index 7c25e10..ddb7322 100644
--- a/pcsd/pcs.rb
+++ b/pcsd/pcs.rb
@@ -8,6 +8,7 @@ require 'net/https'
 require 'json'
 require 'fileutils'
 require 'backports'
+require 'base64'
 
 require 'config.rb'
 require 'cfgsync.rb'
@@ -19,6 +20,9 @@ require 'auth.rb'
 class NotImplementedException < NotImplementedError
 end
 
+class InvalidFileNameException < NameError
+end
+
 def getAllSettings(auth_user, cib_dom=nil)
   unless cib_dom
     cib_dom = get_cib_dom(auth_user)
@@ -131,6 +135,19 @@ def add_order_set_constraint(
   return retval, stderr.join(' ')
 end
 
+def add_colocation_set_constraint(
+  auth_user, resource_set_list, force=false, autocorrect=true
+)
+  command = [PCS, "constraint", "colocation"]
+  resource_set_list.each { |resource_set|
+    command << "set"
+    command.concat(resource_set)
+  }
+  command << '--force' if force
+  command << '--autocorrect' if autocorrect
+  stdout, stderr, retval = run_cmd(auth_user, *command)
+  return retval, stderr.join(' ')
+end
 
 def add_ticket_constraint(
     auth_user, ticket, resource_id, role, loss_policy,
@@ -382,47 +399,47 @@ end
 
 def send_request(auth_user, node, request, post=false, data={}, remote=true, raw_data=nil, timeout=30, cookies_data=nil)
   cookies_data = {} if not cookies_data
-  begin
-    request = "/#{request}" if not request.start_with?("/")
+  request = "/#{request}" if not request.start_with?("/")
 
-    # fix ipv6 address for URI.parse
-    node6 = node
-    if (node.include?(":") and ! node.start_with?("["))
-      node6 = "[#{node}]"
-    end
+  # fix ipv6 address for URI.parse
+  node6 = node
+  if (node.include?(":") and ! node.start_with?("["))
+    node6 = "[#{node}]"
+  end
 
-    if remote
-      uri = URI.parse("https://#{node6}:2224/remote" + request)
-    else
-      uri = URI.parse("https://#{node6}:2224" + request)
-    end
+  if remote
+    uri = URI.parse("https://#{node6}:2224/remote" + request)
+  else
+    uri = URI.parse("https://#{node6}:2224" + request)
+  end
 
-    if post
-      req = Net::HTTP::Post.new(uri.path)
-      raw_data ? req.body = raw_data : req.set_form_data(data)
-    else
-      req = Net::HTTP::Get.new(uri.path)
-      req.set_form_data(data)
-    end
+  if post
+    req = Net::HTTP::Post.new(uri.path)
+    raw_data ? req.body = raw_data : req.set_form_data(data)
+  else
+    req = Net::HTTP::Get.new(uri.path)
+    req.set_form_data(data)
+  end
 
-    cookies_to_send = []
-    cookies_data_default = {}
-    # Let's be safe about characters in cookie variables and do base64.
-    # We cannot do it for CIB_user however to be backward compatible
-    # so we at least remove disallowed characters.
-    cookies_data_default['CIB_user'] = PCSAuth.cookieUserSafe(
-      auth_user[:username].to_s
-    )
-    cookies_data_default['CIB_user_groups'] = PCSAuth.cookieUserEncode(
-      (auth_user[:usergroups] || []).join(' ')
-    )
+  cookies_to_send = []
+  cookies_data_default = {}
+  # Let's be safe about characters in cookie variables and do base64.
+  # We cannot do it for CIB_user however to be backward compatible
+  # so we at least remove disallowed characters.
+  cookies_data_default['CIB_user'] = PCSAuth.cookieUserSafe(
+    auth_user[:username].to_s
+  )
+  cookies_data_default['CIB_user_groups'] = PCSAuth.cookieUserEncode(
+    (auth_user[:usergroups] || []).join(' ')
+  )
 
-    cookies_data_default.update(cookies_data)
-    cookies_data_default.each { |name, value|
-      cookies_to_send << CGI::Cookie.new('name' => name, 'value' => value).to_s
-    }
-    req.add_field('Cookie', cookies_to_send.join(';'))
+  cookies_data_default.update(cookies_data)
+  cookies_data_default.each { |name, value|
+    cookies_to_send << CGI::Cookie.new('name' => name, 'value' => value).to_s
+  }
+  req.add_field('Cookie', cookies_to_send.join(';'))
 
+  begin
     # uri.host returns "[addr]" for ipv6 addresses, which is wrong
     # uri.hostname returns "addr" for ipv6 addresses, which is correct, but it
     #   is not available in older ruby versions
@@ -879,6 +896,10 @@ def pacemaker_running?()
   is_service_running?('pacemaker')
 end
 
+def pacemaker_remote_running?()
+  is_service_running?('pacemaker_remote')
+end
+
 def get_pacemaker_version()
   begin
     stdout, stderror, retval = run_cmd(
@@ -928,9 +949,9 @@ def pcsd_restart()
   fork {
     sleep(10)
     if ISSYSTEMCTL
-      `systemctl restart pcsd`
+      exec("systemctl", "restart", "pcsd")
     else
-      `service pcsd restart`
+      exec("service", "pcsd", "restart")
     end
   }
 end
@@ -1340,10 +1361,10 @@ def pcsd_restart_nodes(auth_user, nodes)
   }
 end
 
-def write_file_lock(path, perm, data)
+def write_file_lock(path, perm, data, binary=false)
+  file = nil
   begin
-    file = nil
-    file = File.open(path, 'w', perm)
+    file = File.open(path, binary ? 'wb' : 'w', perm)
     file.flock(File::LOCK_EX)
     file.write(data)
   rescue => e
@@ -1357,6 +1378,23 @@ def write_file_lock(path, perm, data)
   end
 end
 
+def read_file_lock(path, binary=false)
+  file = nil
+  begin
+    file = File.open(path, binary ? 'rb' : 'r')
+    file.flock(File::LOCK_SH)
+    return file.read()
+  rescue => e
+    $logger.error("Cannot read file '#{path}': #{e.message}")
+    raise
+  ensure
+    unless file.nil?
+      file.flock(File::LOCK_UN)
+      file.close()
+    end
+  end
+end
+
 def verify_cert_key_pair(cert, key)
   errors = []
   cert_modulus = nil
@@ -1681,7 +1719,13 @@ def get_node_status(auth_user, cib_dom)
       :node_attr => node_attrs_to_v2(get_node_attributes(auth_user, cib_dom)),
       :nodes_utilization => get_nodes_utilization(cib_dom),
       :known_nodes => [],
-      :available_features => ['sbd', 'ticket_constraints']
+      :available_features => [
+        'constraint_colocation_set',
+        'sbd',
+        'ticket_constraints',
+        'moving_resource_in_group',
+        'unmanaged_resource',
+      ]
   }
 
   nodes = get_nodes_status()
@@ -1820,7 +1864,7 @@ end
 def status_v1_to_v2(status)
   new_status = status.select { |k,_|
     [:cluster_name, :username, :is_cman_with_udpu_transport,
-     :need_ring1_address, :cluster_settings, :constraints, :groups,
+     :need_ring1_address, :cluster_settings, :constraints,
      :corosync_online, :corosync_offline, :pacemaker_online, :pacemaker_standby,
      :pacemaker_offline, :acls, :fence_levels
     ].include?(k)
@@ -1841,6 +1885,8 @@ def status_v1_to_v2(status)
     ].include?(k)
   }
 
+  new_status[:groups] = get_group_list_from_tree_of_resources(resources)
+
   new_status[:node].update(
     {
       :id => status[:node_id],
@@ -1857,6 +1903,22 @@ def status_v1_to_v2(status)
   return new_status
 end
 
+def get_group_list_from_tree_of_resources(tree)
+  group_list = []
+  tree.each { |resource|
+    if resource.instance_of?(ClusterEntity::Group)
+      group_list << resource.id
+    end
+    if (
+      resource.kind_of?(ClusterEntity::MultiInstance) and
+      resource.member.instance_of?(ClusterEntity::Group)
+    )
+      group_list << resource.member.id
+    end
+  }
+  return group_list
+end
+
 def allowed_for_local_cluster(auth_user, action)
   pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
   return pcs_config.permissions_local.allows?(
@@ -1951,14 +2013,14 @@ def enable_service(service)
 end
 
 def disable_service(service)
+  # fails when the service is not installed, so we need to check it beforehand
+  if not is_service_installed?(service)
+    return true
+  end
+
   if ISSYSTEMCTL
-    # returns success even if the service is not installed
     cmd = ['systemctl', 'disable', "#{service}.service"]
   else
-    if not is_service_installed?(service)
-      return true
-    end
-    # fails when the service is not installed, so we need to check it beforehand
     cmd = ['chkconfig', service, 'off']
   end
   _, _, retcode = run_cmd(PCSAuth.getSuperuserAuth(), *cmd)
@@ -2005,3 +2067,52 @@ def get_parsed_local_sbd_config()
     return nil
   end
 end
+
+def write_booth_config(config, data)
+  if config.include?('/')
+    raise InvalidFileNameException.new(config)
+  end
+  write_file_lock(File.join(BOOTH_CONFIG_DIR, config), nil, data)
+end
+
+def read_booth_config(config)
+  if config.include?('/')
+    raise InvalidFileNameException.new(config)
+  end
+  config_path = File.join(BOOTH_CONFIG_DIR, config)
+  unless File.file?(config_path)
+    return nil
+  end
+  return read_file_lock(config_path)
+end
+
+def write_booth_authfile(filename, data)
+  if filename.include?('/')
+    raise InvalidFileNameException.new(filename)
+  end
+  write_file_lock(
+    File.join(BOOTH_CONFIG_DIR, filename), 0600, Base64.decode64(data), true
+  )
+end
+
+def read_booth_authfile(filename)
+  if filename.include?('/')
+    raise InvalidFileNameException.new(filename)
+  end
+  return Base64.strict_encode64(
+    read_file_lock(File.join(BOOTH_CONFIG_DIR, filename), true)
+  )
+end
+
+def get_authfile_from_booth_config(config_data)
+  authfile_path = nil
+  config_data.split("\n").each {|line|
+    if line.include?('=')
+      parts = line.split('=', 2)
+      if parts[0].strip == 'authfile'
+        authfile_path = parts[1].strip
+      end
+    end
+  }
+  return authfile_path
+end
diff --git a/pcsd/pcsd b/pcsd/pcsd
index 6b3b04f..30f0979 100755
--- a/pcsd/pcsd
+++ b/pcsd/pcsd
@@ -42,6 +42,8 @@ start() {
         export GEM_HOME=/usr/lib/pcsd/vendor/bundle/ruby
         $exec $params > /dev/null 2>&1 &
         echo $! > $pidfile
+        # give it time to fully start or fail
+        sleep 2
         if status $prog > /dev/null 2>&1; then
             touch $lockfile
             success
diff --git a/pcsd/pcsd.debian b/pcsd/pcsd.debian
index 2ed3169..871911b 100755
--- a/pcsd/pcsd.debian
+++ b/pcsd/pcsd.debian
@@ -36,7 +36,9 @@ SLEEP_DURATION=2
 [ -x $(which $SUB_EXEC) ] || echo "$SUB_EXEC not found. Is pcs installed?"
 
 # Read configuration variable file if it is present
+set -a
 [ -r /etc/default/$NAME ] && . /etc/default/$NAME
+set +a
 
 # Source lsb init functions
 . /lib/lsb/init-functions
diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
index d3032cf..dcfd5a0 100644
--- a/pcsd/pcsd.rb
+++ b/pcsd/pcsd.rb
@@ -75,6 +75,7 @@ if development?
 end
 
 before do
+  # nobody is logged in yet
   @auth_user = nil
 
   # get session storage instance from env
@@ -83,8 +84,21 @@ before do
     $session_storage_env = env
   end
 
-  if request.path != '/login' and not request.path == "/logout" and not request.path == '/remote/auth' and not request.path == '/login-status'
-    protected! 
+  # urls which are accesible for everybody including not logged in users
+  always_accessible = [
+    '/login',
+    '/logout',
+    '/login-status',
+    '/remote/auth',
+  ]
+  if not always_accessible.include?(request.path)
+    # Sets @auth_user to a hash containing info about logged in user or halts
+    # the request processing if login credentials are incorrect.
+    protected!
+  else
+    # Set a sane default: nobody is logged in, but we do not need to check both
+    # for nil and empty username (if auth_user and auth_user[:username])
+    @auth_user = {} if not @auth_user
   end
   $cluster_name = get_cluster_name()
 end
@@ -894,7 +908,7 @@ already been added to pcsd.  You may not add two clusters with the same name int
             'type' => 'boolean',
             'shortdesc' => 'Should deleted actions be cancelled',
             'longdesc' => 'Should deleted actions be cancelled',
-            'readable_name' => 'top Orphan Actions',
+            'readable_name' => 'Stop Orphan Actions',
             'advanced' => false
           },
           'start-failure-is-fatal' => {
@@ -1201,33 +1215,168 @@ already been added to pcsd.  You may not add two clusters with the same name int
     return [200, "Node added successfully."]
   end
 
+  def pcs_0_9_142_resource_change_group(auth_user, params)
+    parameters = {
+      :resource_id => params[:resource_id],
+      :resource_group => '',
+      :_orig_resource_group => '',
+    }
+    parameters[:resource_group] = params[:group_id] if params[:group_id]
+    if params[:old_group_id]
+      parameters[:_orig_resource_group] = params[:old_group_id]
+    end
+    return send_cluster_request_with_token(
+      auth_user, params[:cluster], 'update_resource', true, parameters
+    )
+  end
+
+  def pcs_0_9_142_resource_clone(auth_user, params)
+    parameters = {
+      :resource_id => params[:resource_id],
+      :resource_clone => true,
+      :_orig_resource_clone => 'false',
+    }
+    return send_cluster_request_with_token(
+      auth_user, params[:cluster], 'update_resource', true, parameters
+    )
+  end
+
+  def pcs_0_9_142_resource_unclone(auth_user, params)
+    parameters = {
+      :resource_id => params[:resource_id],
+      :resource_clone => nil,
+      :_orig_resource_clone => 'true',
+    }
+    return send_cluster_request_with_token(
+      auth_user, params[:cluster], 'update_resource', true, parameters
+    )
+  end
+
+  def pcs_0_9_142_resource_master(auth_user, params)
+    parameters = {
+      :resource_id => params[:resource_id],
+      :resource_ms => true,
+      :_orig_resource_ms => 'false',
+    }
+    return send_cluster_request_with_token(
+      auth_user, params[:cluster], 'update_resource', true, parameters
+    )
+  end
+
+  # There is a bug in pcs-0.9.138 and older in processing the standby and
+  # unstandby request. JS of that pcsd always sent nodename in "node"
+  # parameter, which caused pcsd daemon to run the standby command locally with
+  # param["node"] as node name. This worked fine if the local cluster was
+  # managed from JS, as pacemaker simply put the requested node into standby.
+  # However it didn't work for managing non-local clusters, as the command was
+  # run on the local cluster everytime. Pcsd daemon would send the request to a
+  # remote cluster if the param["name"] variable was set, and that never
+  # happened. That however wouldn't work either, as then the required parameter
+  # "node" wasn't sent in the request causing an exception on the receiving
+  # node. This is fixed in commit 053f63ca109d9ef9e7f0416e90aab8e140480f5b
+  #
+  # In order to be able to put nodes running pcs-0.9.138 into standby, the
+  # nodename must be sent in "node" param, and the "name" must not be sent.
+  def pcs_0_9_138_node_standby(auth_user, params)
+    translated_params = {
+      'node' => params[:name],
+    }
+    return send_cluster_request_with_token(
+      auth_user, params[:cluster], 'node_standby', true, translated_params
+    )
+  end
+
+  def pcs_0_9_138_node_unstandby(auth_user, params)
+    translated_params = {
+      'node' => params[:name],
+    }
+    return send_cluster_request_with_token(
+      auth_user, params[:cluster], 'node_unstandby', true, translated_params
+    )
+  end
+
   post '/managec/:cluster/?*' do
     auth_user = PCSAuth.sessionToAuthUser(session)
     raw_data = request.env["rack.input"].read
     if params[:cluster]
       request = "/" + params[:splat].join("/")
-      code, out = send_cluster_request_with_token(
-        auth_user, params[:cluster], request, true, params, true, raw_data
-      )
 
       # backward compatibility layer BEGIN
-      # This code correctly remove constraints on pcs/pcsd version 0.9.137 and older
-      redirection = {
-          "/remove_constraint_remote" => "/resource_cmd/rm_constraint",
-          "/remove_constraint_rule_remote" => "/resource_cmd/rm_constraint_rule"
+      translate_for_version = {
+        '/node_standby' => [
+          [[0, 9, 138], method(:pcs_0_9_138_node_standby)],
+        ],
+        '/node_unstandby' => [
+          [[0, 9, 138], method(:pcs_0_9_138_node_unstandby)],
+        ],
       }
-      if code == 404 and redirection.key?(request)
+      if translate_for_version.key?(request)
+        target_pcsd_version = [0, 0, 0]
+        version_code, version_out = send_cluster_request_with_token(
+          auth_user, params[:cluster], 'get_sw_versions'
+        )
+        if version_code == 200
+          begin
+            versions = JSON.parse(version_out)
+            target_pcsd_version = versions['pcs'] if versions['pcs']
+          rescue JSON::ParserError
+          end
+        end
+        translate_function = nil
+        translate_for_version[request].each { |pair|
+          if (target_pcsd_version <=> pair[0]) != 1 # target <= pair
+            translate_function = pair[1]
+            break
+          end
+        }
+      end
+      # backward compatibility layer END
+
+      if translate_function
+        code, out = translate_function.call(auth_user, params)
+      else
         code, out = send_cluster_request_with_token(
-          auth_user,
-          params[:cluster],
-          redirection[request],
-          true,
-          params,
-          false,
-          raw_data
+          auth_user, params[:cluster], request, true, params, true, raw_data
         )
       end
-      # bcl END
+
+      # backward compatibility layer BEGIN
+      if code == 404
+        case request
+          # supported since pcs-0.9.143 (tree view of resources)
+          when '/resource_change_group'
+            code, out =  pcs_0_9_142_resource_change_group(auth_user, params)
+          # supported since pcs-0.9.143 (tree view of resources)
+          when '/resource_clone'
+            code, out = pcs_0_9_142_resource_clone(auth_user, params)
+          # supported since pcs-0.9.143 (tree view of resources)
+          when '/resource_unclone'
+            code, out = pcs_0_9_142_resource_unclone(auth_user, params)
+          # supported since pcs-0.9.143 (tree view of resources)
+          when '/resource_master'
+            code, out = pcs_0_9_142_resource_master(auth_user, params)
+          else
+            redirection = {
+              # constraints removal for pcs-0.9.137 and older
+              "/remove_constraint_remote" => "/resource_cmd/rm_constraint",
+              # constraints removal for pcs-0.9.137 and older
+              "/remove_constraint_rule_remote" => "/resource_cmd/rm_constraint_rule"
+            }
+            if redirection.key?(request)
+              code, out = send_cluster_request_with_token(
+                auth_user,
+                params[:cluster],
+                redirection[request],
+                true,
+                params,
+                false,
+                raw_data
+              )
+            end
+        end
+      end
+      # backward compatibility layer END
+
       return code, out
     end
   end
diff --git a/pcsd/pcsd.service b/pcsd/pcsd.service
index e506f1b..20bc9ab 100644
--- a/pcsd/pcsd.service
+++ b/pcsd/pcsd.service
@@ -5,6 +5,7 @@ Description=PCS GUI and remote configuration interface
 EnvironmentFile=/etc/sysconfig/pcsd
 Environment=GEM_HOME=/usr/lib/pcsd/vendor/bundle/ruby
 ExecStart=/usr/lib/pcsd/pcsd > /dev/null &
+Type=notify
 
 [Install]
 WantedBy=multi-user.target
diff --git a/pcsd/pcsd.service-runner b/pcsd/pcsd.service-runner
index 1949a68..883d290 100644
--- a/pcsd/pcsd.service-runner
+++ b/pcsd/pcsd.service-runner
@@ -2,12 +2,23 @@
 # this file is a pcsd runner callable from a systemd unit
 # it also serves as a holder of a selinux context
 
-# add pcsd to the load path (ruby -I)
-libdir = File.dirname(__FILE__)
-$LOAD_PATH.unshift(libdir) unless $LOAD_PATH.include?(libdir)
+begin
+  # add pcsd to the load path (ruby -I)
+  libdir = File.dirname(__FILE__)
+  $LOAD_PATH.unshift(libdir) unless $LOAD_PATH.include?(libdir)
 
-# change current directory (ruby -C)
-Dir.chdir('/var/lib/pcsd')
+  # change current directory (ruby -C)
+  Dir.chdir('/var/lib/pcsd')
 
-# import and run pcsd
-require 'ssl'
+  # import and run pcsd
+  require 'ssl'
+rescue SignalException => e
+  if [Signal.list['INT'], Signal.list['TERM']].include?(e.signo)
+    # gracefully exit on SIGINT and SIGTERM
+    # pcsd sets up signal handlers later, this catches exceptions which occur
+    # by recieving signals before the handlers have been set up.
+    exit
+  else
+    raise
+  end
+end
diff --git a/pcsd/public/css/style.css b/pcsd/public/css/style.css
index d41b164..0d744d5 100644
--- a/pcsd/public/css/style.css
+++ b/pcsd/public/css/style.css
@@ -848,3 +848,13 @@ table.args-table td.reg {
 .constraint-ticket-add-attribute {
   vertical-align: top;
 }
+
+.cursor-move {
+  cursor: move;
+}
+
+.sortable-table td {
+  height: 1.5em;
+  line-height: 1.2em;
+  background: black;
+}
diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js
index bf1bb92..6ef49e2 100644
--- a/pcsd/public/js/nodes-ember.js
+++ b/pcsd/public/js/nodes-ember.js
@@ -47,6 +47,19 @@ Pcs = Ember.Application.createWithMixins({
       this.get("available_features").indexOf("ticket_constraints") != -1
     );
   }.property("available_features"),
+  is_supported_constraint_colocation_set: function() {
+    return (
+      this.get("available_features").indexOf("constraint_colocation_set") != -1
+    );
+  }.property("available_features"),
+  is_supported_moving_resource_in_group: function() {
+    return (
+      this.get("available_features").indexOf("moving_resource_in_group") != -1
+    );
+  }.property("available_features"),
+  is_supported_unmanaged_resource: function() {
+    return (this.get("available_features").indexOf("unmanaged_resource") != -1);
+  }.property("available_features"),
   is_sbd_running: false,
   is_sbd_enabled: false,
   is_sbd_enabled_or_running: function() {
@@ -205,20 +218,14 @@ Pcs = Ember.Application.createWithMixins({
           Ember.run.scheduleOnce('afterRender', Pcs, function () {
             if (self.get('cur_fence')) {
               if (fence_change) {
-                if (first_run) {
-                  update_instance_attributes(self.get('cur_fence').get('id'));
-                }
-                tree_view_onclick(self.get('cur_fence').get('id'), true);
+                tree_view_onclick(self.get('cur_fence').get('id'));
               } else {
                 tree_view_select(self.get('cur_fence').get('id'));
               }
             }
             if (self.get('cur_resource')) {
               if (resource_change) {
-                if (first_run) {
-                  update_instance_attributes(self.get('cur_resource').get('id'));
-                }
-                tree_view_onclick(self.get('cur_resource').get('id'), true);
+                tree_view_onclick(self.get('cur_resource').get('id'));
               } else {
                 tree_view_select(self.get('cur_resource').get('id'));
               }
@@ -246,6 +253,154 @@ Pcs = Ember.Application.createWithMixins({
   }
 });
 
+Pcs.GroupSelectorComponent = Ember.Component.extend({
+  resource_id: null,
+  resource: function() {
+    var id = this.get("resource_id");
+    if (id) {
+      var resource = Pcs.resourcesContainer.get_resource_by_id(id);
+      if (resource) {
+        return resource;
+      }
+    }
+    return null;
+  }.property("resource_id"),
+  resource_change: function() {
+    this._refresh_fn();
+    this._update_resource_select_content();
+    this._update_resource_select_value();
+  }.observes("resource", "resource_id"),
+  group_list: [],
+  group_select_content: function() {
+    var list = [];
+    $.each(this.get("group_list"), function(_, group) {
+      list.push({
+        name: group,
+        value: group
+      });
+    });
+    return list;
+  }.property("group_list"),
+  group_select_value: null,
+  group: function() {
+    var id = this.get("group_select_value");
+    if (id) {
+      var group = Pcs.resourcesContainer.get_resource_by_id(id);
+      if (group) {
+        return group;
+      }
+    }
+    return null;
+  }.property("group_select_value"),
+  position_select_content: [
+    {
+      name: "before",
+      value: "before"
+    },
+    {
+      name: "after",
+      value: "after"
+    }
+  ],
+  position_select_value: null,
+  position_select_value_changed: function() {
+  }.observes("position_select_value"),
+  resource_select_content: [],
+  resource_select_value: null,
+  group_select_value_changed: function () {
+    this._update_resource_select_content();
+    this._update_resource_select_value();
+  }.observes("group_select_value"),
+  actions: {
+    refresh: function() {
+      this.set("group_list", Pcs.resourcesContainer.get("group_list"));
+      this._refresh_fn();
+      this._update_resource_select_content();
+      this._update_resource_select_value();
+    }
+  },
+  _refresh_fn: function() {
+    var id = this.get("resource_id");
+    if (id) {
+      var resource = Pcs.resourcesContainer.get_resource_by_id(id);
+      if (resource) {
+        var parent = resource.get("parent");
+        if (parent && parent.get("is_group")) {
+          this.set("group_select_value", parent.get("id"));
+          return;
+        }
+      }
+    }
+    this.set("group_select_value", null);
+  },
+  _update_resource_select_content: function() {
+    var self = this;
+    var group = self.get("group");
+    if (!group) {
+      self.set("resource_select_content", []);
+      return;
+    }
+    var list = [];
+    var resource_id;
+    $.each(group.get("members"), function(_, resource) {
+      resource_id = resource.get("id");
+      if (resource_id != self.get("resource_id")) {
+        list.push({
+          name: resource_id,
+          value: resource_id
+        });
+      }
+    });
+    self.set("resource_select_content", list);
+  },
+  _update_resource_select_value: function() {
+    var self = this;
+    var group = self.get("group");
+    var resource = self.get("resource");
+    if (!group) {
+      self.set("resource_select_value", null);
+      return;
+    }
+    var resource_list = group.get("members");
+    if (
+      !resource ||
+      !resource.get("parent") ||
+      resource.get("parent").get("id") != group.get("id")
+    ) {
+      self.set("position_select_value", "after");
+      self.set("resource_select_value", resource_list.slice(-1)[0].get("id"));
+    } else {
+      var index = resource_list.findIndex(function(item) {
+        return item.get("id") == resource.get("id");
+      });
+      if (index == 0) {
+        self.set("position_select_value", "before");
+        self.set(
+          "resource_select_value",
+          (resource_list[1]) ? resource_list[1].get("id") : null // second
+        );
+      } else if (index == -1) {
+        self.set("position_select_value", "after");
+        self.set("resource_select_value", resource_list.slice(-1)[0].get("id"));
+      } else {
+        self.set("position_select_value", "after");
+        self.set("resource_select_value", resource_list[index-1].get("id"));
+      }
+    }
+  },
+  group_input_name: "group_id",
+  classNames: "group-selector",
+  init: function() {
+    this._super();
+    if (this.get("resource_id")) {
+      this.set("group_list", Pcs.resourcesContainer.get("group_list"));
+    }
+    this._refresh_fn();
+    this._update_resource_select_content();
+    this._update_resource_select_value();
+  }
+});
+
 Pcs.ValueSelectorComponent = Ember.Component.extend({
   tagName: 'select',
   attributeBindings: ['name'],
@@ -345,9 +500,9 @@ Pcs.UtilizationTableComponent = Ember.Component.extend({
     },
     add: function(form_id) {
       var id = "#" + form_id;
-      var name = $(id + " input[name='new_utilization_name']").val();
+      var name = $(id + " input[name='new_utilization_name']").val().trim();
       if (name == "") {
-        return;
+        alert("Name of utilization attribute should be non-empty string.");
       }
       var value = $(id + " input[name='new_utilization_value']").val().trim();
       if (!is_integer(value)) {
@@ -668,6 +823,11 @@ Pcs.ResourceObj = Ember.Object.extend({
   id: null,
   _id: Ember.computed.alias('id'),
   name: Ember.computed.alias('id'),
+  treeview_element_id: function() {
+    if (this.get("id")) {
+      return this.get("id") + "-treeview-element";
+    }
+  }.property("id"),
   parent: null,
   meta_attr: [],
   meta_attributes: Ember.computed.alias('meta_attr'),
@@ -683,20 +843,6 @@ Pcs.ResourceObj = Ember.Object.extend({
     }
     return null;
   }.property('parent'),
-  group_selector: function() {
-    var self = this;
-    var cur_group = self.get('get_group_id');
-    var html = '<select>\n<option value="">None</option>\n';
-    $.each(self.get('group_list'), function(_, group) {
-      html += '<option value="' + group + '"';
-      if (cur_group === group) {
-        html += 'selected';
-      }
-      html += '>' + group + '</option>\n';
-    });
-    html += '</select><input type="button" value="Change group" onclick="resource_change_group(curResource(), $(this).prev().prop(\'value\'));">';
-    return html;
-  }.property('group_list', 'get_group_id'),
   status: "unknown",
   class_type: null, // property to determine type of the resource
   resource_type: function() { // this property is just for displaying resource type in GUI
@@ -705,7 +851,9 @@ Pcs.ResourceObj = Ember.Object.extend({
   }.property("class_type"),
   res_type: Ember.computed.alias('resource_type'),
   status_icon: function() {
-    var icon_class = get_status_icon_class(this.get("status_val"));
+    var icon_class = get_status_icon_class(
+      this.get("status_val"), this.get("is_unmanaged")
+    );
     return "<div style=\"float:left;margin-right:6px;height:16px;\" class=\"" + icon_class + " sprites\"></div>";
   }.property("status_val"),
   status_val: function() {
@@ -721,19 +869,31 @@ Pcs.ResourceObj = Ember.Object.extend({
     }
   }.property('status', 'error_list. at each.message', 'warning_list. at each.message'),
   status_color: function() {
-    return get_status_color(this.get("status_val"));
+    return get_status_color(this.get("status_val"), this.get("is_unmanaged"));
   }.property("status_val"),
   status_style: function() {
-    var color = get_status_color(this.get("status_val"));
+    var color = get_status_color(
+      this.get("status_val"), this.get("is_unmanaged")
+    );
     return "color: " + color + ((color != "green")? "; font-weight: bold;" : "");
   }.property("status_val"),
   show_status: function() {
-    return '<span style="' + this.get('status_style') + '">' + this.get('status') + '</span>';
+    return '<span style="' + this.get('status_style') + '">'
+      + this.get('status') + (this.get("is_unmanaged") ? " (unmanaged)" : "")
+      + '</span>';
   }.property("status_style", "disabled"),
   status_class: function() {
-    var show = ((Pcs.clusterController.get("show_all_resources"))? "" : "hidden ");
-    return ((this.get("status_val") == get_status_value("ok") || this.status == "disabled") ? show + "default-hidden" : "");
-  }.property("status_val"),
+    if (
+      this.get("status_val") == get_status_value("ok") ||
+      this.get("status") == "disabled"
+    ) {
+      return (
+        Pcs.clusterController.get("show_all_resources") ? "" : "hidden "
+        ) + "default-hidden";
+    } else {
+      return "";
+    }
+  }.property("status_val", "status"),
   status_class_fence: function() {
     var show = ((Pcs.clusterController.get("show_all_fence"))? "" : "hidden ");
     return ((this.get("status_val") == get_status_value("ok")) ? show + "default-hidden" : "");
@@ -762,11 +922,21 @@ Pcs.ResourceObj = Ember.Object.extend({
         return "";
     }
   }.property("status_val"),
+  show_group_selector: function() {
+    var parent = this.get("parent");
+    return !(
+      parent &&
+      parent.is_group &&
+      parent.get("parent") &&
+      Pcs.resourcesContainer.get("is_version_1")
+    );
+  }.property(),
 
   location_constraints: [],
   ordering_constraints: [],
   ordering_set_constraints: [],
   colocation_constraints: [],
+  colocation_set_constraints: [],
 
   get_map: function() {
     var self = this;
@@ -848,6 +1018,17 @@ Pcs.PrimitiveObj = Pcs.ResourceObj.extend({
   instance_status: [],
   operations: [],
   utilization: [],
+  is_unmanaged: function() {
+    var instance_status_list = this.get("instance_status");
+    if (!instance_status_list) {
+      return true;
+    }
+    var is_managed = true;
+    $.each(instance_status_list, function(_, instance_status) {
+      is_managed = is_managed && instance_status.get("managed");
+    });
+    return !is_managed;
+  }.property("instance_status. at each.managed"),
   resource_type: function() {
     var agent = this.get("agentname");
     if (agent) {
@@ -1542,8 +1723,9 @@ Pcs.Cluster = Ember.Object.extend({
     var num = 0;
     $.each(this.get(type), function(key, value) {
       if (value.get("status_val") < get_status_value("ok") &&
-        value.status != "disabled" && value.status != "standby" &&
-        value.status != "maintenance"
+        [
+          "unmanaged", "disabled", "standby", "maintenance"
+        ].indexOf(value.status) == -1
       ) {
         num++;
       }
@@ -2381,6 +2563,7 @@ function constraint_resort(constraints){
       ordering_constraints: {},
       ordering_set_constraints: {},
       colocation_constraints: {},
+      colocation_set_constraints: {},
     };
   }
 
@@ -2391,6 +2574,7 @@ function constraint_resort(constraints){
 
   var colocations = constraint_resort_part(constraints.rsc_colocation, {
     plain: constraint_colocation_create_resource_keyed_map,
+    with_sets: constraint_set_create_resource_keyed_map,
   });
 
   var locations = constraint_resort_part(constraints.rsc_location, {
@@ -2409,5 +2593,6 @@ function constraint_resort(constraints){
     ticket_constraints: tickets.plain,
     ticket_set_constraints: tickets.with_sets,
     colocation_constraints: colocations.plain,
+    colocation_set_constraints: colocations.with_sets,
   };
 }
diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
index 41c481e..360fa1b 100644
--- a/pcsd/public/js/pcsd.js
+++ b/pcsd/public/js/pcsd.js
@@ -96,50 +96,77 @@ function select_menu(menu, item, initial) {
 }
 
 function create_group() {
-  var num_nodes = 0;
-  var node_names = "";
-  $("#resource_list :checked").parent().parent().each(function (index,element) {
-    if (element.getAttribute("nodeID")) {
-      num_nodes++;
-      node_names += element.getAttribute("nodeID") + " "
-    }
-  });
-
-  if (num_nodes == 0) {
+  var resource_list = get_checked_ids_from_nodelist("resource_list");
+  if (resource_list.length == 0) {
     alert("You must select at least one resource to add to a group");
     return;
   }
-
-  $("#resources_to_add_to_group").val(node_names);
+  var not_primitives = resource_list.filter(function(resource_id) {
+    return !Pcs.resourcesContainer.get_resource_by_id(resource_id).get(
+      "is_primitive"
+    );
+  });
+  if (not_primitives.length != 0) {
+    alert("Members of group have to be primitive resources. These resources" +
+      " are not primitives: " + not_primitives.join(", "));
+    return;
+  }
+  var order_el = $("#new_group_resource_list tbody");
+  order_el.empty();
+  order_el.append(resource_list.map(function (item) {
+    return `<tr value="${item}" class="cursor-move"><td>${item}</td></tr>`;
+  }));
+  var order_obj = order_el.sortable();
+  order_el.disableSelection();
   $("#add_group").dialog({
     title: 'Create Group',
+    width: 'auto',
     modal: true,
     resizable: false,
-    buttons: {
-      Cancel: function() {
-        $(this).dialog("close");
+    buttons: [
+      {
+        text: "Cancel",
+        click: function() {
+          $(this).dialog("close");
+        }
       },
-      "Create Group": function() {
-        var data = $('#add_group > form').serialize();
-        var url = get_cluster_remote_url() + "add_group";
-        ajax_wrapper({
-          type: "POST",
-          url: url,
-          data: data,
-          success: function() {
-            Pcs.update();
-            $("#add_group").dialog("close");
-          },
-          error: function (xhr, status, error) {
-            alert(
-              "Error creating group "
-              + ajax_simple_error(xhr, status, error)
-            );
-            $("#add_group").dialog("close");
-          }
-        });
+      {
+        text: "Create Group",
+        id: "add_group_submit_btn",
+        click: function() {
+          var dialog_obj = $(this);
+          var submit_btn_obj = dialog_obj.parent().find(
+            "#add_group_submit_btn"
+          );
+          submit_btn_obj.button("option", "disabled", true);
+
+          ajax_wrapper({
+            type: "POST",
+            url: get_cluster_remote_url() + "add_group",
+            data: {
+              resource_group: $(
+                '#add_group:visible input[name=resource_group]'
+              ).val(),
+              resources: order_obj.sortable(
+                "toArray", {attribute: "value"}
+              ).join(" ")
+            },
+            success: function() {
+              submit_btn_obj.button("option", "disabled", false);
+              Pcs.update();
+              dialog_obj.dialog("close");
+            },
+            error: function (xhr, status, error) {
+              alert(
+                "Error creating group "
+                + ajax_simple_error(xhr, status, error)
+              );
+              submit_btn_obj.button("option", "disabled", false);
+            }
+          });
+        }
       }
-    }
+    ]
   });
 }
 
@@ -1134,8 +1161,8 @@ function hover_out(o) {
 }
 
 function reload_current_resource() {
-  tree_view_onclick(curResource(), true);
-  tree_view_onclick(curStonith(), true);
+  tree_view_onclick(curResource());
+  tree_view_onclick(curStonith());
 }
 
 function load_row(node_row, ac, cur_elem, containing_elem, also_set, initial_load){
@@ -1262,7 +1289,9 @@ function remove_nodes(ids, force) {
 }
 
 function remove_resource(ids, force) {
-  var data = {};
+  var data = {
+    no_error_if_not_exists: true
+  };
   if (force) {
     data["force"] = force;
   }
@@ -1287,12 +1316,27 @@ function remove_resource(ids, force) {
       Pcs.update();
     },
     error: function (xhr, status, error) {
-      error = $.trim(error)
-      var message = "Unable to remove resources (" + error + ")";
+      error = $.trim(error);
+      var message = "";
       if (
-        (xhr.responseText.substring(0,6) == "Error:") || ("Forbidden" == error)
+        status == "timeout" ||
+        error == "timeout" ||
+        xhr.responseText == '{"noresponse":true}'
       ) {
-        message += "\n\n" + xhr.responseText.replace("--force", "'Enforce removal'");
+        message = "Operation takes longer to complete than expected.";
+      } else {
+        message = "Unable to remove resources (" + error + ")";
+        if (
+          (xhr.responseText.substring(0, 6) == "Error:") ||
+          ("Forbidden" == error)
+        ) {
+          message += "\n\n" + xhr.responseText.replace(
+            "--force", "'Enforce removal'"
+          );
+          alert(message);
+          $("#verify_remove_submit_btn").button("option", "disabled", false);
+          return;
+        }
       }
       alert(message);
       $("#dialog_verify_remove_resources.ui-dialog-content").each(
@@ -1916,6 +1960,7 @@ function get_status_value(status) {
     maintenance: 2,
     "partially running": 2,
     disabled: 3,
+    unmanaged: 3,
     unknown: 4,
     ok: 5,
     running: 5,
@@ -1932,7 +1977,8 @@ function status_comparator(a,b) {
   return valA - valB;
 }
 
-function get_status_icon_class(status_val) {
+function get_status_icon_class(status_val, is_unmanaged) {
+  var is_unmanaged = typeof is_unmanaged !== 'undefined' ? is_unmanaged : false;
   switch (status_val) {
     case get_status_value("error"):
       return "error";
@@ -1940,15 +1986,16 @@ function get_status_icon_class(status_val) {
     case get_status_value("warning"):
       return "warning";
     case get_status_value("ok"):
-      return "check";
+      return is_unmanaged ? "warning" : "check";
     default:
       return "x";
   }
 }
 
-function get_status_color(status_val) {
+function get_status_color(status_val, is_unmanaged) {
+  var is_unmanaged = typeof is_unmanaged !== 'undefined' ? is_unmanaged : false;
   if (status_val == get_status_value("ok")) {
-    return "green";
+    return is_unmanaged? "orange" : "green";
   }
   else if (status_val == get_status_value("warning") || status_val == get_status_value("unknown") || status_val == get_status_value('disabled')) {
     return "orange";
@@ -2032,8 +2079,13 @@ function fix_auth_of_cluster() {
   });
 }
 
-function get_tree_view_element_id(element) {
-  return $(element).parents('table.tree-element')[0].id;
+function get_tree_view_resource_id(element) {
+  var suffix = '-treeview-element';
+  var element_id = $(element).parents('table.tree-element')[0].id;
+  if (element_id && element_id.endsWith(suffix)) {
+    return element_id.substr(0, element_id.lastIndexOf(suffix));
+  }
+  return null;
 }
 
 function get_list_view_element_id(element) {
@@ -2046,6 +2098,7 @@ function auto_show_hide_constraints() {
     "ordering_constraints",
     "ordering_set_constraints",
     "colocation_constraints",
+    "colocation_set_constraints",
     "ticket_constraints",
     "ticket_set_constraints",
     "meta_attributes",
@@ -2097,34 +2150,30 @@ function update_instance_attributes(resource_id) {
   }, res_obj.get("stonith"));
 }
 
-function tree_view_onclick(resource_id, auto) {
-  auto = typeof auto !== 'undefined' ? auto : false;
+function tree_view_onclick(resource_id) {
   var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id);
   if (!resource_obj) {
     console.log("Resource " + resource_id + "not found.");
     return;
   }
   if (resource_obj.get('stonith')) {
-    Pcs.resourcesContainer.set('cur_fence', resource_obj);
-    if (!auto) {
+    if (window.location.hash.startsWith("#/fencedevices")) {
       window.location.hash = "/fencedevices/" + resource_id;
-      update_instance_attributes(resource_id);
     }
+    Pcs.resourcesContainer.set('cur_fence', resource_obj);
   } else {
-    Pcs.resourcesContainer.set('cur_resource', resource_obj);
-
-    if (!auto) {
+    if (window.location.hash.startsWith("#/resources")) {
       window.location.hash = "/resources/" + resource_id;
-      update_instance_attributes(resource_id);
     }
+    Pcs.resourcesContainer.set('cur_resource', resource_obj);
     auto_show_hide_constraints();
   }
-
+  update_instance_attributes(resource_id);
   tree_view_select(resource_id);
 }
 
 function tree_view_select(element_id) {
-  var e = $('#' + element_id);
+  var e = $(`#${element_id}-treeview-element`);
   var view = e.parents('table.tree-view');
   view.find('div.arrow').hide();
   view.find('tr.children').hide();
@@ -2136,15 +2185,6 @@ function tree_view_select(element_id) {
   e.find('tr.children').show();
 }
 
-function list_view_select(element_id) {
-  var e = $('#' + element_id);
-  var view = e.parents('table.list-view');
-  view.find('div.arrow').hide();
-  view.find('tr.list-view-element').removeClass("node_selected");
-  e.addClass('node_selected');
-  e.find('div.arrow').show();
-}
-
 function tree_view_checkbox_onchange(element) {
   var e = $(element);
   var children = $(element).closest(".tree-element").find(".children" +
@@ -2246,24 +2286,24 @@ function resource_ungroup(group_id) {
   });
 }
 
-function resource_change_group(resource_id, group_id) {
+function resource_change_group(resource_id, form) {
   if (resource_id == null) {
     return;
   }
   show_loading_screen();
   var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id);
   var data = {
-    resource_id: resource_id,
-    group_id: group_id
+    resource_id: resource_id
   };
+  $.each($(form).serializeArray(), function(_, item) {
+    data[item.name] = item.value;
+  });
 
-  if (resource_obj.get('parent')) {
-    if (resource_obj.get('parent').get('id') == group_id) {
-      return;
-    }
-    if (resource_obj.get('parent').get('class_type') == 'group') {
-      data['old_group_id'] = resource_obj.get('parent').get('id');
-    }
+  if (
+    resource_obj.get('parent') &&
+    resource_obj.get('parent').get('class_type') == 'group'
+  ) {
+    data['old_group_id'] = resource_obj.get('parent').get('id');
   }
 
   ajax_wrapper({
@@ -2949,3 +2989,64 @@ function sbd_status_dialog() {
     buttons: buttonsOpts
   });
 }
+
+function unmanage_resource(resource_id) {
+  if (!resource_id) {
+    return;
+  }
+  fade_in_out("#resource_unmanage_link");
+  ajax_wrapper({
+    type: 'POST',
+    url: get_cluster_remote_url() + "unmanage_resource",
+    data: {
+      resource_list_json: JSON.stringify([resource_id]),
+    },
+    timeout: pcs_timeout,
+    complete: function() {
+      Pcs.update();
+    },
+    error: function (xhr, status, error) {
+      alert(
+        `Unable to unmanage '${resource_id}': ` +
+        ajax_simple_error(xhr, status, error)
+      );
+    },
+  });
+}
+
+function manage_resource(resource_id) {
+  if (!resource_id) {
+    return;
+  }
+  fade_in_out("#resource_manage_link");
+  ajax_wrapper({
+    type: 'POST',
+    url: get_cluster_remote_url() + "manage_resource",
+    data: {
+      resource_list_json: JSON.stringify([resource_id]),
+    },
+    timeout: pcs_timeout,
+    complete: function() {
+      Pcs.update();
+    },
+    error: function (xhr, status, error) {
+      alert(
+        `Unable to manage '${resource_id}': ` +
+        ajax_simple_error(xhr, status, error)
+      );
+    }
+  });
+}
+
+function show_add_resource_dialog() {
+  var new_resource_group_selector_id = $(
+    "#new_resource_agent .group-selector"
+  ).attr("id");
+  Ember.View.views[new_resource_group_selector_id].set(
+    "group_select_value", null
+  );
+  $('#new_resource_agent').dialog({
+    title: 'Add Resource',
+    modal:true, width: 'auto'
+  });
+}
diff --git a/pcsd/remote.rb b/pcsd/remote.rb
index b1e00fa..97e63f1 100644
--- a/pcsd/remote.rb
+++ b/pcsd/remote.rb
@@ -83,6 +83,10 @@ def remote(params, request, auth_user)
       :qdevice_client_disable => method(:qdevice_client_disable),
       :qdevice_client_start => method(:qdevice_client_start),
       :qdevice_client_stop => method(:qdevice_client_stop),
+      :booth_set_config => method(:booth_set_config),
+      :booth_save_files => method(:booth_save_files),
+      :booth_get_config => method(:booth_get_config),
+
   }
   remote_cmd_with_pacemaker = {
       :pacemaker_node_status => method(:remote_pacemaker_node_status),
@@ -116,7 +120,9 @@ def remote(params, request, auth_user)
       :set_resource_utilization => method(:set_resource_utilization),
       :set_node_utilization => method(:set_node_utilization),
       :get_resource_agent_metadata => method(:get_resource_agent_metadata),
-      :get_fence_agent_metadata => method(:get_fence_agent_metadata)
+      :get_fence_agent_metadata => method(:get_fence_agent_metadata),
+      :manage_resource => method(:manage_resource),
+      :unmanage_resource => method(:unmanage_resource),
   }
 
   command = params[:command].to_sym
@@ -328,9 +334,8 @@ end
 def node_standby(params, request, auth_user)
   if params[:name]
     code, response = send_request_with_token(
-      auth_user, params[:name], 'node_standby', true, {"node"=>params[:name]}
+      auth_user, params[:name], 'node_standby', true
     )
-    # data={"node"=>params[:name]} for backward compatibility with older versions of pcs/pcsd
   else
     if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
       return 403, 'Permission denied'
@@ -344,9 +349,8 @@ end
 def node_unstandby(params, request, auth_user)
   if params[:name]
     code, response = send_request_with_token(
-      auth_user, params[:name], 'node_unstandby', true, {"node"=>params[:name]}
+      auth_user, params[:name], 'node_unstandby', true
     )
-    # data={"node"=>params[:name]} for backward compatibility with older versions of pcs/pcsd
   else
     if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
       return 403, 'Permission denied'
@@ -769,9 +773,19 @@ def get_sw_versions(params, request, auth_user)
 end
 
 def remote_node_available(params, request, auth_user)
-  if (not ISRHEL6 and File.exist?(Cfgsync::CorosyncConf.file_path)) or (ISRHEL6 and File.exist?(Cfgsync::ClusterConf.file_path)) or File.exist?("/var/lib/pacemaker/cib/cib.xml")
+  if (
+    (not ISRHEL6 and File.exist?(Cfgsync::CorosyncConf.file_path)) or
+    (ISRHEL6 and File.exist?(Cfgsync::ClusterConf.file_path)) or
+    File.exist?("/var/lib/pacemaker/cib/cib.xml")
+  )
     return JSON.generate({:node_available => false})
   end
+  if pacemaker_remote_running?()
+    return JSON.generate({
+      :node_available => false,
+      :pacemaker_remote => true,
+    })
+  end
   return JSON.generate({:node_available => true})
 end
 
@@ -827,8 +841,15 @@ def remote_remove_nodes(params, request, auth_user)
   stdout, stderr, retval = run_cmd(
     auth_user, PCS, "cluster", "stop", *stop_params
   )
-  if retval != 0
-    return [400, stderr.join]
+  if retval != 0 and not params['force']
+    # If forced, keep going even if unable to stop all nodes (they may be dead).
+    # Add info this error is forceable if pcs did not do it (e.g. when unable
+    # to connect to some nodes).
+    message = stderr.join
+    if not message.include?(', use --force to override')
+      message += ', use --force to override'
+    end
+    return [400, message]
   end
 
   node_list.each {|node|
@@ -1038,6 +1059,8 @@ def node_status(params, request, auth_user)
     :cman => node.cman,
     :corosync_enabled => node.corosync_enabled,
     :pacemaker_enabled => node.pacemaker_enabled,
+    :pacemaker_remote => node.services[:pacemaker_remote][:running],
+    :pacemaker_remote_enabled => node.services[:pacemaker_remote][:enabled],
     :pcsd_enabled => node.pcsd_enabled,
     :corosync_online => status[:corosync_online],
     :corosync_offline => status[:corosync_offline],
@@ -1396,21 +1419,23 @@ def update_resource (params, request, auth_user)
 
   param_line = getParamList(params)
   if not params[:resource_id]
-    out, stderr, retval = run_cmd(
-      auth_user,
-      PCS, "resource", "create", params[:name], params[:resource_type],
-      *param_line
-    )
-    if retval != 0
-      return JSON.generate({"error" => "true", "stderr" => stderr, "stdout" => out})
-    end
+    cmd = [PCS, "resource", "create", params[:name], params[:resource_type]]
+    cmd += param_line
     if params[:resource_group] and params[:resource_group] != ""
-      run_cmd(
-        auth_user,
-        PCS, "resource","group", "add", params[:resource_group], params[:name]
+      cmd += ['--group', params[:resource_group]]
+      if (
+        ['before', 'after'].include?(params[:in_group_position]) and
+        params[:in_group_reference_resource_id]
       )
+        cmd << "--#{params[:in_group_position]}"
+        cmd << params[:in_group_reference_resource_id]
+      end
       resource_group = params[:resource_group]
     end
+    out, stderr, retval = run_cmd(auth_user, *cmd)
+    if retval != 0
+      return JSON.generate({"error" => "true", "stderr" => stderr, "stdout" => out})
+    end
 
     if params[:resource_clone] and params[:resource_clone] != ""
       name = resource_group ? resource_group : params[:name]
@@ -1442,10 +1467,18 @@ def update_resource (params, request, auth_user)
         )
       end
     else
-      run_cmd(
-        auth_user, PCS, "resource", "group", "add", params[:resource_group],
+      cmd = [
+        PCS, "resource", "group", "add", params[:resource_group],
         params[:resource_id]
+      ]
+      if (
+        ['before', 'after'].include?(params[:in_group_position]) and
+        params[:in_group_reference_resource_id]
       )
+        cmd << "--#{params[:in_group_position]}"
+        cmd << params[:in_group_reference_resource_id]
+      end
+      run_cmd(auth_user, *cmd)
     end
   end
 
@@ -1546,10 +1579,10 @@ def remove_resource(params, request, auth_user)
       end
       cmd = [PCS, '-f', tmp_file.path, 'resource', 'disable']
       resource_list.each { |resource|
-        _, err, retval = run_cmd(user, *cmd, resource)
+        out, err, retval = run_cmd(user, *(cmd + [resource]))
         if retval != 0
           unless (
-            err.join('').index('unable to find a resource') != -1 and
+            (out + err).join('').include?('unable to find a resource') and
             no_error_if_not_exists
           )
             errors += "Unable to stop resource '#{resource}': #{err.join('')}"
@@ -1584,7 +1617,10 @@ def remove_resource(params, request, auth_user)
     end
     out, err, retval = run_cmd(auth_user, *cmd)
     if retval != 0
-      unless out.index(' does not exist.') != -1 and no_error_if_not_exists
+      unless (
+        (out + err).join('').include?(' does not exist.') and
+        no_error_if_not_exists
+      )
         errors += err.join(' ').strip + "\n"
       end
     end
@@ -1821,6 +1857,11 @@ def add_constraint_set_remote(params, request, auth_user)
       auth_user,
       params["resources"].values, params["force"], !params['disable_autocorrect']
     )
+  when "col"
+    retval, error = add_colocation_set_constraint(
+      auth_user,
+      params["resources"].values, params["force"], !params['disable_autocorrect']
+    )
   when "ticket"
     unless params["options"]["ticket"]
       return [400, "Error adding constraint ticket: option ticket missing"]
@@ -2074,10 +2115,17 @@ def resource_change_group(params, request, auth_user)
     end
     return 200
   end
-  _, stderr, retval = run_cmd(
-    auth_user,
+  cmd = [
     PCS, 'resource', 'group', 'add', params[:group_id], params[:resource_id]
+  ]
+  if (
+  ['before', 'after'].include?(params[:in_group_position]) and
+    params[:in_group_reference_resource_id]
   )
+    cmd << "--#{params[:in_group_position]}"
+    cmd << params[:in_group_reference_resource_id]
+  end
+  _, stderr, retval = run_cmd(auth_user, *cmd)
   if retval != 0
     return [400, "Unable to add resource '#{params[:resource_id]}' to " +
       "group '#{params[:group_id]}': #{stderr.join('')}"
@@ -2190,7 +2238,7 @@ def set_node_utilization(params, reqest, auth_user)
 
   if retval != 0
     return [400, "Unable to set utilization '#{name}=#{value}' for node " +
-      "'#{res_id}': #{stderr.join('')}"
+      "'#{node}': #{stderr.join('')}"
     ]
   end
   return 200
@@ -2589,3 +2637,185 @@ def qdevice_client_start(param, request, auth_user)
     return [400, msg]
   end
 end
+
+def manage_resource(param, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  unless param[:resource_list_json]
+    return [400, "Required parameter 'resource_list_json' is missing."]
+  end
+  begin
+    resource_list = JSON.parse(param[:resource_list_json])
+    _, err, retval = run_cmd(
+      auth_user, PCS, 'resource', 'manage', *resource_list
+    )
+    if retval != 0
+      return [400, err.join('')]
+    end
+    return [200, '']
+  rescue JSON::ParserError
+    return [400, 'Invalid input data format']
+  end
+end
+
+def unmanage_resource(param, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  unless param[:resource_list_json]
+    return [400, "Required parameter 'resource_list_json' is missing."]
+  end
+  begin
+    resource_list = JSON.parse(param[:resource_list_json])
+    _, err, retval = run_cmd(
+      auth_user, PCS, 'resource', 'unmanage', *resource_list
+    )
+    if retval != 0
+      return [400, err.join('')]
+    end
+    return [200, '']
+  rescue JSON::ParserError
+    return [400, 'Invalid input data format']
+  end
+end
+
+def booth_set_config(params, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  begin
+    unless params[:data_json]
+      return [400, "Missing required parameter 'data_json'"]
+    end
+    data = JSON.parse(params[:data_json], {:symbolize_names => true})
+  rescue JSON::ParserError
+    return [400, 'Invalid input data format']
+  end
+  config = data[:config]
+  authfile = data[:authfile]
+  return [400, 'Invalid input data format'] unless (
+    config and config[:name] and config[:data]
+  )
+  return [400, 'Invalid input data format'] if (
+    authfile and (not authfile[:name] or not authfile[:data])
+  )
+  begin
+    write_booth_config(config[:name], config[:data])
+    if authfile
+      write_booth_authfile(authfile[:name], authfile[:data])
+    end
+  rescue InvalidFileNameException => e
+    return [400, "Invalid format of config/key file name '#{e.message}'"]
+  rescue => e
+    msg = "Unable to save booth configuration: #{e.message}"
+    $logger.error(msg)
+    return [400, msg]
+  end
+  msg = 'Booth configuration saved.'
+  $logger.info(msg)
+  return [200, msg]
+end
+
+def booth_save_files(params, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  begin
+    data = JSON.parse(params[:data_json], {:symbolize_names => true})
+    data.each { |file|
+      unless file[:name] and file[:data]
+        return [400, 'Invalid input data format']
+      end
+      if file[:name].include?('/')
+        return [400, "Invalid file name format '#{file[:name]}'"]
+      end
+    }
+  rescue JSON::ParserError, NoMethodError
+    return [400, 'Invalid input data format']
+  end
+  rewrite_existing = (
+  params.include?('rewrite_existing') || params.include?(:rewrite_existing)
+  )
+
+  conflict_files = []
+  data.each { |file|
+    next unless File.file?(File.join(BOOTH_CONFIG_DIR, file[:name]))
+    if file[:is_authfile]
+      cur_data = read_booth_authfile(file[:name])
+    else
+      cur_data = read_booth_config(file[:name])
+    end
+    if cur_data != file[:data]
+      conflict_files << file[:name]
+    end
+  }
+
+  write_failed = {}
+  saved_files = []
+  data.each { |file|
+    next if conflict_files.include?(file[:name]) and not rewrite_existing
+    begin
+      if file[:is_authfile]
+        write_booth_authfile(file[:name], file[:data])
+      else
+        write_booth_config(file[:name], file[:data])
+      end
+      saved_files << file[:name]
+    rescue => e
+      msg = "Unable to save file (#{file[:name]}): #{e.message}"
+      $logger.error(msg)
+      write_failed[file[:name]] = e
+    end
+  }
+  return [200, JSON.generate({
+    :existing => conflict_files,
+    :saved => saved_files,
+    :failed => write_failed
+  })]
+end
+
+def booth_get_config(params, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::READ)
+    return 403, 'Permission denied'
+  end
+  name = params[:name]
+  if name
+    config_file_name = "#{name}.conf"
+  else
+    config_file_name = 'booth.conf'
+  end
+  if config_file_name.include?('/')
+    return [400, 'Invalid name of booth configuration']
+  end
+  begin
+    config_data = read_booth_config(config_file_name)
+    unless config_data
+      return [400, "Config doesn't exist"]
+    end
+    authfile_name = nil
+    authfile_data = nil
+    authfile_path = get_authfile_from_booth_config(config_data)
+    if authfile_path
+      if File.dirname(authfile_path) != BOOTH_CONFIG_DIR
+        return [
+          400, "Authfile of specified config is not in '#{BOOTH_CONFIG_DIR}'"
+        ]
+      end
+      authfile_name = File.basename(authfile_path)
+      authfile_data = read_booth_authfile(authfile_name)
+    end
+    return [200, JSON.generate({
+      :config => {
+        :name => config_file_name,
+        :data => config_data
+      },
+      :authfile => {
+        :name => authfile_name,
+        :data => authfile_data
+      }
+    })]
+  rescue => e
+    return [400, "Unable to read booth config/key file: #{e.message}"]
+  end
+end
diff --git a/pcsd/settings.rb b/pcsd/settings.rb
index 51f00ac..e702585 100644
--- a/pcsd/settings.rb
+++ b/pcsd/settings.rb
@@ -20,6 +20,7 @@ PACEMAKERD = "/usr/sbin/pacemakerd"
 CIBADMIN = "/usr/sbin/cibadmin"
 SBD_CONFIG = '/etc/sysconfig/sbd'
 CIB_PATH='/var/lib/pacemaker/cib/cib.xml'
+BOOTH_CONFIG_DIR='/etc/booth'
 
 COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR = "/etc/corosync/qnetd/nssdb"
 COROSYNC_QDEVICE_NET_SERVER_CA_FILE = (
diff --git a/pcsd/settings.rb.debian b/pcsd/settings.rb.debian
index aae1b11..27202e8 100644
--- a/pcsd/settings.rb.debian
+++ b/pcsd/settings.rb.debian
@@ -18,7 +18,7 @@ COROSYNC_BINARIES = "/usr/sbin/"
 CMAN_TOOL = "/usr/sbin/cman_tool"
 PACEMAKERD = "/usr/sbin/pacemakerd"
 CIBADMIN = "/usr/sbin/cibadmin"
-SBD_CONFIG = "/etc/sysconfig/sbd"
+SBD_CONFIG = "/etc/default/sbd"
 CIB_PATH = "/var/lib/pacemaker/cib/cib.xml"
 
 COROSYNC_QDEVICE_NET_SERVER_CERTS_DIR = "/etc/corosync/qnetd/nssdb"
diff --git a/pcsd/ssl.rb b/pcsd/ssl.rb
index f56c947..7bbff46 100644
--- a/pcsd/ssl.rb
+++ b/pcsd/ssl.rb
@@ -3,10 +3,15 @@ require 'webrick'
 require 'webrick/https'
 require 'openssl'
 require 'rack'
+require 'socket'
 
 require 'bootstrap.rb'
 require 'pcs.rb'
 
+unless defined? OpenSSL::SSL::OP_NO_TLSv1_1
+  OpenSSL::SSL::OP_NO_TLSv1_1 = 268435456
+end
+
 server_name = WEBrick::Utils::getservername
 $logger = configure_logger('/var/log/pcsd/pcsd.log')
 
@@ -66,11 +71,28 @@ def run_server(server, webrick_options, secondary_addrs)
 
   $logger.info("Listening on #{primary_addr} port #{port}")
   server.run(Sinatra::Application, webrick_options) { |server_instance|
+    # configure ssl options
     server_instance.ssl_context.ciphers = ciphers
+    # set listening addresses
     secondary_addrs.each { |addr|
       $logger.info("Adding listener on #{addr} port #{port}")
       server_instance.listen(addr, port)
     }
+    # notify systemd we are running
+    if ISSYSTEMCTL
+      socket_name = ENV['NOTIFY_SOCKET']
+      if socket_name
+        if socket_name.start_with?('@')
+          # abstract namespace socket
+          socket_name[0] = "\0"
+        end
+        $logger.info("Notifying systemd we are running (socket #{socket_name})")
+        sd_socket = Socket.new(Socket::AF_UNIX, Socket::SOCK_DGRAM)
+        sd_socket.connect(Socket.pack_sockaddr_un(socket_name))
+        sd_socket.send('READY=1', 0)
+        sd_socket.close()
+      end
+    end
   }
 end
 
@@ -95,7 +117,8 @@ else
 end
 
 default_bind = true
-primary_addr = '::'
+# see https://github.com/ClusterLabs/pcs/issues/51
+primary_addr = if RUBY_VERSION >= '2.1' then '*' else '::' end
 secondary_addrs = []
 if ENV['PCSD_BIND_ADDR']
   user_addrs = ENV['PCSD_BIND_ADDR'].split(',').collect { |x| x.strip() }
diff --git a/pcsd/views/_dialogs.erb b/pcsd/views/_dialogs.erb
index 46e7fdb..d18ac71 100644
--- a/pcsd/views/_dialogs.erb
+++ b/pcsd/views/_dialogs.erb
@@ -215,3 +215,24 @@
   </table>
   {{/if}}
 </div>
+
+<div id="add_group" style="display: none;">
+  <form method=POST onkeypress="if (event.keyCode == 13) {$(this).parent().parent().find('.ui-dialog-buttonpane button:eq(1)').trigger('click');return false;} " action="/resource_group_add">
+    <table>
+      <tr>
+        <td>Group Name:</td>
+        <td>
+          <input name="resource_group" type="text" />
+        </td>
+      </tr>
+      <tr>
+        <td style="vertical-align: top;">Change order of resources:</td>
+        <td>
+          <table id="new_group_resource_list" class="sortable-table">
+            <tbody></tbody>
+          </table>
+        </td>
+      </tr>
+    </table>
+  </form>
+</div>
diff --git a/pcsd/views/_resource.erb b/pcsd/views/_resource.erb
index a337160..86e5567 100644
--- a/pcsd/views/_resource.erb
+++ b/pcsd/views/_resource.erb
@@ -14,7 +14,7 @@
       Remove</a>    </div>
   <div class="plus sprites"></div><div class="link"> 
     <% if @myView == "resource" %>
-      <a href="#" onclick="$('#new_resource_agent').dialog({title: 'Add Resource', modal:true, width: 'auto'});return false;">
+      <a href="#" onclick="show_add_resource_dialog();return false;">
     <% else %>
       <a href="#" onclick="$('#new_stonith_agent').dialog({title: 'Add Fence Device', modal:true, width: 'auto'});return false;">
     <% end %>
@@ -116,10 +116,4 @@
           table_id_suffix="_new"
       }}
     </div>
-    <div id="add_group" style="display: none;">
-      <form method=POST onkeypress="if (event.keyCode == 13) {$(this).parent().parent().find('.ui-dialog-buttonpane button:eq(1)').trigger('click');return false;} " action="/resource_group_add">
-	<p style="font-size:12px;">Group Name:</p><input name="resource_group" type=text>
-	<input id="resources_to_add_to_group"  type=hidden name="resources" value="">
-      </form>
-    </div>
     <% end %>
diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb
index 5461515..bd7c234 100644
--- a/pcsd/views/main.erb
+++ b/pcsd/views/main.erb
@@ -63,26 +63,6 @@
 </head>
 <html>
   <body>
-  <script type="text/x-handlebars" data-template-name="components/list-view">
-  <div style="width: 300px;" id="node_list" {{bind-attr class=element-class}}>
-    <table cellpadding="0" cellspacing="0" class="list-view" style="width: 100%; border: none;">
-      <tr>
-        <th style="width:27px;"><input type="checkbox" onchange="checkBoxToggle(this,false)"></th><th style="width:47px;"></th><th>{{table-title}}</th><th style="width:18px;"></th>
-      </tr>
-      {{#each element in elements}}
-      <tr class="list-view-element" onmouseover="$(this).addClass('mouse_on_row');" onmouseout="$(this).removeClass('mouse_on_row');" onclick="list_view_select(get_list_view_element_id(this));" {{bind-attr id=element.name}}>
-        <td class="node_list_check">{{view Ember.Checkbox checkedBinding="checked" class="node_list_check"}}</td>
-        <td class="node_list_sprite">{{#if default-icon}}{{{default-icon}}}{{else}}{{#if element.status_icon}}{{{element.status_icon}}}{{else}}<div class="check sprites"></div>{{/if}}{{/if}}</td>
-        <td class="node_name" nowrap {{bind-attr style=element.style}}>{{element.name}}</td>
-        <td>
-          <div style="display: none;" class="arrow sprites"></div>
-        </td>
-      </tr>
-      {{/each}}
-      </table>
-    </div>
-  </script>
-
   <script type="text/x-handlebars" data-template-name="components/resource-tree-view">
   <div style="width: 450px;">
     <table cellpadding="0" cellspacing="0" class="tree-view" style="width: 100%; border: none;">
@@ -99,8 +79,8 @@
   </script>
 
   <script type="text/x-handlebars" data-template-name="components/resource-tree-element">
-    <table class="tree-element"  cellpadding="0" cellspacing="0" style="width: 100%; border: none;" {{bind-attr id=node._id}}>
-    <tr class="tree-element-name" onclick="tree_view_onclick(get_tree_view_element_id(this));" onmouseover="$(this).addClass('mouse_on_row');" onmouseout="$(this).removeClass('mouse_on_row');" {{bind-attr nodeID=node.id}}>
+    <table class="tree-element"  cellpadding="0" cellspacing="0" style="width: 100%; border: none;" {{bind-attr id=node.treeview_element_id}}>
+    <tr class="tree-element-name" onclick="tree_view_onclick(get_tree_view_resource_id(this));" onmouseover="$(this).addClass('mouse_on_row');" onmouseout="$(this).removeClass('mouse_on_row');" {{bind-attr nodeID=node.id}}>
         <td style="width:20px;" class="node_list_check">
           <input type="checkbox" onchange="tree_view_checkbox_onchange(this)">
         </td>
@@ -160,6 +140,7 @@
       </table>
     </div>
     <div id="node_options_buttons">
+    <div>
     {{#if resource.stonith}}
       <div class="xdark sprites" style="float: left"></div>
       <div id="stonith_delete_link" class="link" onclick="verify_remove_fence_devices(curStonith());">Remove</div>
@@ -174,7 +155,32 @@
       <div id="resource_cleanup_link" class="link" onclick="cleanup_resource();">Cleanup</div>
       <div class="xdark sprites" style="float: left"></div>
       <div id="resource_delete_link" class="link" onclick="verify_remove_resources(curResource());">Remove</div>
+      </div>
+      <div>
+      {{#if Pcs.is_supported_unmanaged_resource}}
+        <div>
+        <div class="checkdark sprites" style="float: left"></div>
+        <div
+          id="resource_manage_link"
+          class="link"
+          onclick="manage_resource(curResource());"
+        >
+          Manage
+        </div>
+        </div>
+        <div>
+        <div class="cancel sprites" style="float: left"></div>
+        <div
+          id="resource_unmanage_link"
+          class="link"
+          onclick="unmanage_resource(curResource());"
+        >
+          Unmanage
+        </div>
+        </div>
+      {{/if}}
     {{/if}}
+    </div>
       <!--
       <div class="move sprites" style="float: left"></div>
       <div id="resource_move_link" class="link">Move</div>
@@ -220,7 +226,6 @@
             <td class="bold" nowrap>Current Location:</td>
             <td id="cur_res_loc" class="reg">{{resource.nodes_running_on_string}}</td>
           </tr>
-          {{#unless old_pcsd}}
           {{#unless resource.parent}}
             <tr>
               <td class="bold" nowrap>Clone:</td>
@@ -237,23 +242,23 @@
             <tr>
               <td class="bold" nowrap>Group:</td>
               <td id="cur_res_loc" class="reg">
-                {{{resource.group_selector}}}
+                {{group-selector resource_id=resource._id}}
               </td>
             </tr>
           {{else}}
             {{#if resource.parent.is_group}}
+            {{#if resource.show_group_selector}}
             <tr>
               <td class="bold" nowrap>Group:</td>
               <td id="cur_res_loc" class="reg">
-                {{{resource.group_selector}}}
+                {{group-selector resource_id=resource._id}}
               </td>
             </tr>
             {{/if}}
-          {{/unless}}
+            {{/if}}
           {{/unless}}
         {{/if}}
         {{/unless}}
-        {{#unless old_pcsd}}
         {{#if resource.is_group}}
         {{#unless resource.parent}}
           <tr>
@@ -268,12 +273,14 @@
               <input type="button" onclick="resource_master(curResource());" value="Create master/slave">
             </td>
           </tr>
-          <tr>
-            <td class="bold" nowrap>Group:</td>
-            <td id="cur_res_loc" class="reg">
-              <input type="button" onclick="resource_ungroup(curResource());" value="Ungroup">
-            </td>
-          </tr>
+          {{#unless old_pcsd}}
+            <tr>
+              <td class="bold" nowrap>Group:</td>
+              <td id="cur_res_loc" class="reg">
+                <input type="button" onclick="resource_ungroup(curResource());" value="Ungroup">
+              </td>
+            </tr>
+          {{/unless}}
         {{/unless}}
         {{/if}}
         {{#if resource.is_multi_instance}}
@@ -284,13 +291,15 @@
             </td>
           </tr>
         {{/if}}
-        {{/unless}}
       </table>
       {{#unless resource.stonith}}
         {{location_constraints-table constraints=resource.location_constraints}}
         {{ordering_constraints-table constraints=resource.ordering_constraints resource_id=resource._id}}
         {{ordering_set_constraints-table constraints=resource.ordering_set_constraints}}
         {{colocation_constraints-table constraints=resource.colocation_constraints}}
+        {{#if Pcs.is_supported_constraint_colocation_set}}
+          {{colocation_set_constraints-table constraints=resource.colocation_set_constraints}}
+        {{/if}}
         {{#if Pcs.is_ticket_constraints_supported}}
           {{ticket_constraints-table constraints=resource.ticket_constraints resource_id=resource._id}}
           {{ticket_set_constraints-table constraints=resource.ticket_set_constraints}}
@@ -696,6 +705,47 @@ Use the 'Add' button to submit the form.">
 	      </table>
   </script>
 
+  <script type="text/x-handlebars" data-template-name="components/colocation_set_constraints-table">
+    <table style="clear:left;float:left;">
+          <tr><td style="display: block;" onclick="show_hide_constraints(this)" class="datatable_header hover-pointer" id="colocation_set_constraints"><span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span>Resource Colocation Set Preferences ({{#if constraints.length}}{{constraints.length}}{{else}}0{{/if}})</td></tr>
+          <tr><td>
+            <div id="locationdep">
+              <table class="datatable">
+                <tr>
+                  <th>Preference Name/Set of Resources</th>
+                  <th style="text-align: center;">Remove</th>
+                </tr>
+                {{#each cons in constraints}}
+                <tr>
+                  <td>{{cons.id}}</td>
+                  <td {{bind-attr constraint_id="cons.id"}} style="text-align:center;">
+                    <a onclick="return remove_constraint_action(this);" href="#" class="remove">X</a>
+                  </td>
+                </tr>
+                {{#each set in cons.sets}}
+                <tr>
+                  <td style="padding-left:2em;">Set:{{#each rsc in set.resources}} {{rsc}}{{/each}}</td>
+                  <td></td>
+                </tr>
+                {{/each}}
+                {{else}}
+                <tr><td style="color: gray;">NONE</td><td></td></tr>
+                {{/each}}
+                <tr id="new_res_col_set" title="Enter the resources you want to be in one set into the 'Set' field separated by space.
+Use the 'New Set' button to create more sets.
+Use the 'Add' button to submit the form.">
+                  <td>Set: <input type="text" name="resource_ids[]"></td>
+                  <td style="vertical-align: bottom;">
+                    <button type="button" onclick="new_constraint_set_row('#new_res_col_set');" name="new-row">New Set</button>
+                    <button type="button" onclick="add_constraint_set('#new_res_col_set', 'col', false);" name="add">Add</button>
+                  </td>
+                </tr>
+              </table>
+            </div>
+          </td></tr>
+        </table>
+  </script>
+
   <script type="text/x-handlebars" data-template-name="components/meta_attributes-table">
     <table style="clear:left;float:left">
 		<tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer" id="meta_attributes"><span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span>Resource Meta Attributes ({{#if resource.meta_attr.length}}{{resource.meta_attr.length}}{{else}}0{{/if}})</td></tr>
@@ -865,10 +915,9 @@ Use the 'Add' button to submit the form.">
                     </div>
                   </td>
                   <td>
-                    {{value-selector
-                        prompt="None"
-                        content=groups
-                        name="resource_group"
+                    {{group-selector
+                        group_list=Pcs.resourcesContainer.group_list
+                        group_input_name="resource_group"
                     }}
                   </td>
                 </tr>
@@ -1051,6 +1100,46 @@ Use the 'Add' button to submit the form.">
     </td>
   </script>
 
+  <script type="text/x-handlebars" data-template-name="components/group-selector">
+    {{value-selector
+        name=group_input_name
+        content=group_select_content
+        value=group_select_value
+        prompt="None"
+    }}
+    {{#if Pcs.is_supported_moving_resource_in_group}}
+    {{#if group_select_value}}
+    {{#if resource_select_content}}
+      {{value-selector
+          name="in_group_position"
+          content=position_select_content
+          value=position_select_value
+          prompt=""
+      }}
+      {{value-selector
+          name="in_group_reference_resource_id"
+          content=resource_select_content
+          value=resource_select_value
+          prompt=""
+      }}
+    {{/if}}
+    {{/if}}
+    {{/if}}
+    {{#if resource_id}}
+      <br/>
+      <button
+        onclick="
+          resource_change_group(curResource(), $(this).parent().find('select'));
+          return false;
+        "
+      >
+        Update group
+      </button>
+      <button {{action refresh}}>Refresh</button>
+    {{/if}}
+
+  </script>
+
   <script type="text/x-handlebars">
 <div id="wrapper">
 
diff --git a/pcsd/views/manage.erb b/pcsd/views/manage.erb
index 885b327..39ab41f 100644
--- a/pcsd/views/manage.erb
+++ b/pcsd/views/manage.erb
@@ -113,13 +113,18 @@
               <td>
                 <table class="datatable">
                   <tr>
-                    <th style="width: 150px;">RESOURCE</th>
-                    <th style="width: 100px;">STATUS</th>
+                    <th style="width: 170px;">RESOURCE</th>
+                    <th style="width: 150px;">STATUS</th>
                   </tr>
                   {{#each r in Pcs.clusterController.cur_cluster.resource_list}}
                   <tr {{bind-attr title=r.tooltip}} {{bind-attr class=r.status_class}}>
                     <td><a {{bind-attr href=r.url_link}}>{{r.id}}</a></td>
-                    <td {{bind-attr style=r.status_style}}>{{{r.status_icon}}}{{r.status}}</td>
+                    <td {{bind-attr style=r.status_style}}>
+                      {{{r.status_icon}}}{{r.status}}
+                      {{#if r.is_unmanaged}}
+                        (unmanaged)
+                      {{/if}}
+                    </td>
                   </tr>
                   {{else}}
                   <tr>
@@ -144,8 +149,8 @@
               <td>
                 <table class="datatable">
                   <tr>
-                    <th style="width: 150px;">FENCE-DEVICE</th>
-                    <th style="width: 100px;">STATUS</th>
+                    <th style="width: 170px;">FENCE-DEVICE</th>
+                    <th style="width: 150px;">STATUS</th>
                   </tr>
                   {{#each f in Pcs.clusterController.cur_cluster.fence_list}}
                   <tr {{bind-attr title=f.tooltip}} {{bind-attr class=f.status_class_fence}}>
diff --git a/setup.py b/setup.py
index 0e8a45c..f698f2f 100644
--- a/setup.py
+++ b/setup.py
@@ -16,7 +16,7 @@ class CleanCommand(Command):
 
 setup(
     name='pcs',
-    version='0.9.153',
+    version='0.9.154',
     description='Pacemaker Configuration System',
     author='Chris Feist',
     author_email='cfeist at redhat.com',

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-ha/pcs.git



More information about the Debian-HA-Commits mailing list