[Debian-ha-commits] [pcs] 01/04: Imported Upstream version 0.9.152

Valentin Vidic vvidic-guest at moszumanska.debian.org
Mon Jun 27 18:25:15 UTC 2016


This is an automated email from the git hooks/post-receive script.

vvidic-guest pushed a commit to branch master
in repository pcs.

commit 7162906c716df1e1297cd46002bbbc3f7757b2ee
Author: Valentin Vidic <Valentin.Vidic at CARNet.hr>
Date:   Mon Jun 27 14:39:30 2016 +0200

    Imported Upstream version 0.9.152
---
 pcs/app.py                                         |  16 +-
 pcs/cli/common/lib_wrapper.py                      |  32 +-
 pcs/cli/common/reports.py                          |   9 +-
 pcs/cli/constraint_ticket/console_report.py        |  21 +-
 .../constraint_ticket/test/test_console_report.py  |  12 +
 pcs/cluster.py                                     | 110 ++-
 pcs/common/report_codes.py                         |  40 +-
 pcs/common/tools.py                                |  14 +
 pcs/constraint.py                                  |   7 +-
 pcs/lib/cib/constraint/ticket.py                   |   2 +-
 pcs/lib/cib/resource.py                            |   8 +-
 pcs/lib/cib/test/test_constraint_ticket.py         |  13 +
 pcs/lib/cib/test/test_resource.py                  |  21 +
 pcs/lib/commands/qdevice.py                        | 152 +++++
 pcs/lib/commands/sbd.py                            | 355 ++++++++++
 pcs/lib/corosync/config_facade.py                  |   3 +-
 pcs/lib/corosync/qdevice_net.py                    |  74 ++
 pcs/lib/external.py                                | 240 ++++++-
 pcs/lib/node.py                                    |  46 +-
 pcs/lib/nodes_task.py                              |  15 +
 pcs/lib/reports.py                                 | 522 ++++++++++++++
 pcs/lib/resource_agent.py                          | 212 +++---
 pcs/lib/sbd.py                                     | 364 ++++++++++
 pcs/lib/tools.py                                   |  49 ++
 pcs/pcs.8                                          | 253 ++++---
 pcs/qdevice.py                                     |  89 +++
 pcs/quorum.py                                      |   3 +
 pcs/resource.py                                    |  36 +-
 pcs/settings_default.py                            |  17 +-
 pcs/stonith.py                                     | 260 +++++--
 pcs/test/suite.py                                  |  55 +-
 pcs/test/test_common_tools.py                      |  65 ++
 pcs/test/test_lib_commands_qdevice.py              | 759 +++++++++++++++++++++
 pcs/test/test_lib_commands_quorum.py               |   8 +-
 pcs/test/test_lib_commands_sbd.py                  | 668 ++++++++++++++++++
 pcs/test/test_lib_corosync_config_facade.py        |  66 +-
 pcs/test/test_lib_corosync_qdevice_net.py          |  91 +++
 pcs/test/test_lib_external.py                      | 488 +++++++++++++
 pcs/test/test_lib_node.py                          |  48 ++
 pcs/test/test_lib_nodes_task.py                    |  10 +
 pcs/test/test_lib_resource_agent.py                | 316 +++------
 pcs/test/test_lib_sbd.py                           | 596 ++++++++++++++++
 pcs/test/test_lib_tools.py                         |  44 ++
 pcs/test/test_quorum.py                            |  28 +-
 pcs/test/test_resource.py                          |   2 +-
 pcs/test/test_stonith.py                           |   4 +-
 pcs/test/tools/assertions.py                       |  27 +
 pcs/usage.py                                       | 328 +++++----
 pcs/utils.py                                       | 104 ++-
 pcsd/bootstrap.rb                                  |   2 +-
 pcsd/cluster_entity.rb                             |  34 +-
 pcsd/pcs.rb                                        | 250 +++++--
 pcsd/pcsd.rb                                       |  16 +-
 pcsd/public/css/style.css                          |  60 +-
 pcsd/public/js/nodes-ember.js                      | 358 +++++++---
 pcsd/public/js/pcsd.js                             | 258 ++++++-
 pcsd/remote.rb                                     | 259 ++++++-
 pcsd/settings.rb                                   |   2 +
 pcsd/settings.rb.debian                            |   2 +
 pcsd/views/_dialogs.erb                            | 160 +++++
 pcsd/views/_resource.erb                           |  28 +-
 pcsd/views/main.erb                                | 242 ++++++-
 pcsd/views/nodes.erb                               |  13 +
 setup.py                                           |  14 +-
 64 files changed, 7485 insertions(+), 915 deletions(-)

diff --git a/pcs/app.py b/pcs/app.py
index d8255f8..3c4865f 100644
--- a/pcs/app.py
+++ b/pcs/app.py
@@ -19,6 +19,7 @@ from pcs import (
     node,
     pcsd,
     prop,
+    qdevice,
     quorum,
     resource,
     settings,
@@ -95,7 +96,7 @@ def main(argv=None):
             "token=", "token_coefficient=", "consensus=", "join=",
             "miss_count_const=", "fail_recv_const=",
             "corosync_conf=", "cluster_conf=",
-            "remote",
+            "remote", "watchdog=",
             #in pcs status - do not display resorce status on inactive node
             "hide-inactive",
         ]
@@ -123,10 +124,16 @@ def main(argv=None):
     argv = real_argv
     for o, a in pcs_options:
         if not o in utils.pcs_options:
+            if o == "--watchdog":
+                a = [a]
             utils.pcs_options[o] = a
         else:
             # If any options are a list then they've been entered twice which isn't valid
-            utils.err("%s can only be used once" % o)
+            if o != "--watchdog":
+                utils.err("%s can only be used once" % o)
+            else:
+                utils.pcs_options[o].append(a)
+
         if o == "-h" or o == "--help":
             if len(argv) == 0:
                 usage.main()
@@ -181,6 +188,11 @@ def main(argv=None):
             argv,
             utils.get_modificators()
         ),
+        "qdevice": lambda argv: qdevice.qdevice_cmd(
+            utils.get_library_wrapper(),
+            argv,
+            utils.get_modificators()
+        ),
     }
     if command not in cmd_map:
         usage.main()
diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
index 269a9ac..909b435 100644
--- a/pcs/cli/common/lib_wrapper.py
+++ b/pcs/cli/common/lib_wrapper.py
@@ -15,7 +15,11 @@ from pcs.cli.common import middleware
 from pcs.lib.commands.constraint import colocation as constraint_colocation
 from pcs.lib.commands.constraint import order as constraint_order
 from pcs.lib.commands.constraint import ticket as constraint_ticket
-from pcs.lib.commands import quorum
+from pcs.lib.commands import (
+    quorum,
+    qdevice,
+    sbd,
+)
 from pcs.cli.common.reports import (
     LibraryReportProcessorToConsole as LibraryReportProcessorToConsole,
 )
@@ -110,6 +114,32 @@ def load_module(env, middleware_factory, name):
                 "update_device": quorum.update_device,
             }
         )
+    if name == "qdevice":
+        return bind_all(
+            env,
+            middleware.build(),
+            {
+                "setup": qdevice.qdevice_setup,
+                "destroy": qdevice.qdevice_destroy,
+                "start": qdevice.qdevice_start,
+                "stop": qdevice.qdevice_stop,
+                "kill": qdevice.qdevice_kill,
+                "enable": qdevice.qdevice_enable,
+                "disable": qdevice.qdevice_disable,
+            }
+        )
+    if name == "sbd":
+        return bind_all(
+            env,
+            middleware.build(),
+            {
+                "enable_sbd": sbd.enable_sbd,
+                "disable_sbd": sbd.disable_sbd,
+                "get_cluster_sbd_status": sbd.get_cluster_sbd_status,
+                "get_cluster_sbd_config": sbd.get_cluster_sbd_config,
+                "get_local_sbd_config": sbd.get_local_sbd_config,
+            }
+        )
 
     raise Exception("No library part '{0}'".format(name))
 
diff --git a/pcs/cli/common/reports.py b/pcs/cli/common/reports.py
index 367f9aa..c97cf6f 100644
--- a/pcs/cli/common/reports.py
+++ b/pcs/cli/common/reports.py
@@ -49,7 +49,7 @@ def _build_report_message(report_item, force_text=""):
 
     return get_template(report_item).format(force=force_text)
 
-def process_library_reports(report_item_list, is_forced=False):
+def process_library_reports(report_item_list):
     """
     report_item_list list of ReportItem
     """
@@ -63,13 +63,6 @@ def process_library_reports(report_item_list, is_forced=False):
             print(report_item.message)
             continue
 
-        if report_item.forceable and is_forced:
-            # Let the user know what may be wrong even when --force is used,
-            # as it may be used for override early errors hiding later
-            # errors otherwise.
-            print("Warning: " + report_item.message)
-            continue
-
         sys.stderr.write('Error: {0}\n'.format(_build_report_message(
             report_item,
             _prepare_force_text(report_item)
diff --git a/pcs/cli/constraint_ticket/console_report.py b/pcs/cli/constraint_ticket/console_report.py
index 657b867..54343d7 100644
--- a/pcs/cli/constraint_ticket/console_report.py
+++ b/pcs/cli/constraint_ticket/console_report.py
@@ -13,14 +13,13 @@ def constraint_plain(constraint_info, with_id=False):
     bool with_id have to show id with options_dict
     """
     options = constraint_info["options"]
-    return " ".join(
-        [options.get("rsc-role", ""), options.get("rsc", "")]
-        +
-        prepare_options(
-            dict(
-                (name, value) for name, value in options.items()
-                if name not in ["rsc-role", "rsc"]
-            ),
-            with_id
-        )
-    )
+    role = options.get("rsc-role", "")
+    role_prefix = "{0} ".format(role) if role else ""
+
+    return role_prefix + " ".join([options.get("rsc", "")] + prepare_options(
+        dict(
+            (name, value) for name, value in options.items()
+            if name not in ["rsc-role", "rsc"]
+        ),
+        with_id
+    ))
diff --git a/pcs/cli/constraint_ticket/test/test_console_report.py b/pcs/cli/constraint_ticket/test/test_console_report.py
index a8e570f..b352287 100644
--- a/pcs/cli/constraint_ticket/test/test_console_report.py
+++ b/pcs/cli/constraint_ticket/test/test_console_report.py
@@ -21,3 +21,15 @@ class ConstraintPlainTest(TestCase):
                 with_id=True
             )
         )
+
+    def test_prepare_report_without_role(self):
+        self.assertEqual(
+            "resourceA (id:some_id)",
+            console_report.constraint_plain(
+                {"options": {
+                    "rsc": "resourceA",
+                    "id": "some_id"
+                }},
+                with_id=True
+            )
+        )
diff --git a/pcs/cluster.py b/pcs/cluster.py
index 872b922..002b5c5 100644
--- a/pcs/cluster.py
+++ b/pcs/cluster.py
@@ -38,9 +38,20 @@ from pcs.utils import parallel_for_nodes
 from pcs.common import report_codes
 from pcs.lib import (
     pacemaker as lib_pacemaker,
+    sbd as lib_sbd,
     reports as lib_reports,
 )
-from pcs.lib.errors import ReportItemSeverity, LibraryError
+from pcs.lib.tools import environment_file_to_dict
+from pcs.lib.external import (
+    disable_service,
+    NodeCommunicationException,
+    node_communicator_exception_to_report_item,
+)
+from pcs.lib.node import NodeAddresses
+from pcs.lib.errors import (
+    LibraryError,
+    ReportItemSeverity,
+)
 from pcs.lib.corosync import config_parser as corosync_conf_utils
 
 def cluster_cmd(argv):
@@ -1310,11 +1321,58 @@ def cluster_node(argv):
                 "cluster is not configured for RRP, "
                 "you must not specify ring 1 address for the node"
             )
+        utils.check_qdevice_algorithm_and_running_cluster(
+            utils.getCorosyncConf(), add=True
+        )
         corosync_conf = None
         (canAdd, error) =  utils.canAddNodeToCluster(node0)
         if not canAdd:
             utils.err("Unable to add '%s' to cluster: %s" % (node0, error))
 
+        try:
+            node_addr = NodeAddresses(node0, node1)
+            lib_env = utils.get_lib_env()
+            report_processor = lib_env.report_processor
+            node_communicator = lib_env.node_communicator()
+            if lib_sbd.is_sbd_enabled(utils.cmd_runner()):
+                if "--watchdog" not in utils.pcs_options:
+                    watchdog = settings.sbd_watchdog_default
+                    print("Warning: using default watchdog '{0}'".format(
+                        watchdog
+                    ))
+                else:
+                    watchdog = utils.pcs_options["--watchdog"][0]
+
+                report_processor.process(lib_reports.sbd_check_started())
+                lib_sbd.check_sbd_on_node(
+                    report_processor, node_communicator, node_addr, watchdog
+                )
+                sbd_cfg = environment_file_to_dict(
+                    lib_sbd.get_local_sbd_config()
+                )
+                sbd_cfg["SBD_WATCHDOG_DEV"] = watchdog
+                report_processor.process(
+                    lib_reports.sbd_config_distribution_started()
+                )
+                lib_sbd.set_sbd_config_on_node(
+                    report_processor, node_communicator, node_addr, sbd_cfg
+                )
+                report_processor.process(lib_reports.sbd_enabling_started())
+                lib_sbd.enable_sbd_service_on_node(
+                    report_processor, node_communicator, node_addr
+                )
+            else:
+                report_processor.process(lib_reports.sbd_disabling_started())
+                lib_sbd.disable_sbd_service_on_node(
+                    report_processor, node_communicator, node_addr
+                )
+        except LibraryError as e:
+            utils.process_library_reports(e.args)
+        except NodeCommunicationException as e:
+            utils.process_library_reports(
+                [node_communicator_exception_to_report_item(e)]
+            )
+
         for my_node in utils.getNodesFromCorosyncConf():
             retval, output = utils.addLocalNode(my_node, node0, node1)
             if retval != 0:
@@ -1375,6 +1433,9 @@ def cluster_node(argv):
             utils.err(
                 "node '%s' does not appear to exist in configuration" % node0
             )
+        utils.check_qdevice_algorithm_and_running_cluster(
+            utils.getCorosyncConf(), add=False
+        )
         if "--force" not in utils.pcs_options:
             retval, data = utils.get_remote_quorumtool_output(node0)
             if retval != 0:
@@ -1458,6 +1519,30 @@ def cluster_localnode(argv):
         else:
             success = utils.removeNodeFromClusterConf(node)
 
+# The removed node might be present in CIB. If it is, pacemaker will show it as
+# offline, no matter it's not in corosync / cman config any longer. We remove
+# the node by running 'crm_node -R <node>' on the node where the remove command
+# was ran. This only works if pacemaker is running. If it's not, we need
+# to remove the node manually from the CIB on all nodes.
+        cib_node_remove = None
+        if utils.usefile:
+            cib_node_remove = utils.filename
+        elif not utils.is_service_running(utils.cmd_runner(), "pacemaker"):
+            cib_node_remove = os.path.join(settings.cib_dir, "cib.xml")
+        if cib_node_remove:
+            original_usefile, original_filename = utils.usefile, utils.filename
+            utils.usefile = True
+            utils.filename = cib_node_remove
+            dummy_output, dummy_retval = utils.run([
+                "cibadmin",
+                "--delete-all",
+                "--force",
+                "--xpath=/cib/configuration/nodes/node[@uname='{0}']".format(
+                    node
+                ),
+            ])
+            utils.usefile, utils.filename = original_usefile, original_filename
+
         if success:
             print("%s: successfully removed!" % node)
         else:
@@ -1616,6 +1701,11 @@ def cluster_destroy(argv):
         print("Killing any remaining services...")
         os.system("killall -q -9 corosync aisexec heartbeat pacemakerd ccm stonithd ha_logd lrmd crmd pengine attrd pingd mgmtd cib fenced dlm_controld gfs_controld")
         utils.disableServices()
+        try:
+            disable_service(utils.cmd_runner(), "sbd")
+        except:
+            # it's not a big deal if sbd disable fails
+            pass
 
         print("Removing all cluster configuration files...")
         if utils.is_rhel6():
@@ -1740,7 +1830,7 @@ def cluster_remote_node(argv):
 
 def cluster_quorum_unblock(argv):
     if len(argv) > 0:
-        usage.cluster(["quorum", "unblock"])
+        usage.quorum(["unblock"])
         sys.exit(1)
 
     if utils.is_rhel6():
@@ -1761,8 +1851,20 @@ def cluster_quorum_unblock(argv):
     )
     if not unjoined_nodes:
         utils.err("no unjoined nodes found")
+    if "--force" not in utils.pcs_options:
+        answer = utils.get_terminal_input(
+            (
+                "WARNING: If node(s) {nodes} are not powered off or they do"
+                + " have access to shared resources, data corruption and/or"
+                + " cluster failure may occur. Are you sure you want to"
+                + " continue? [y/N] "
+            ).format(nodes=", ".join(unjoined_nodes))
+        )
+        if answer.lower() not in ["y", "yes"]:
+            print("Canceled")
+            return
     for node in unjoined_nodes:
-        stonith.stonith_confirm([node])
+        stonith.stonith_confirm([node], skip_question=True)
 
     output, retval = utils.run(
         ["corosync-cmapctl", "-s", "quorum.cancel_wait_for_all", "u8", "1"]
@@ -1777,5 +1879,5 @@ def cluster_quorum_unblock(argv):
         "false" if startup_fencing.lower() != "false" else "true"
     )
     utils.set_cib_property("startup-fencing", startup_fencing)
-    print("Waiting for nodes cancelled")
+    print("Waiting for nodes canceled")
 
diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
index 0bc5d48..927df35 100644
--- a/pcs/common/report_codes.py
+++ b/pcs/common/report_codes.py
@@ -12,14 +12,20 @@ FORCE_CONSTRAINT_MULTIINSTANCE_RESOURCE = "CONSTRAINT_MULTIINSTANCE_RESOURCE"
 FORCE_LOAD_THRESHOLD = "LOAD_THRESHOLD"
 FORCE_OPTIONS = "OPTIONS"
 FORCE_QDEVICE_MODEL = "QDEVICE_MODEL"
+FORCE_UNKNOWN_AGENT = "UNKNOWN_AGENT"
+FORCE_UNSUPPORTED_AGENT = "UNSUPPORTED_AGENT"
+FORCE_METADATA_ISSUE = "METADATA_ISSUE"
 SKIP_OFFLINE_NODES = "SKIP_OFFLINE_NODES"
 
+AGENT_GENERAL_ERROR = "AGENT_GENERAL_ERROR"
+AGENT_NOT_FOUND = "AGENT_NOT_FOUND"
 BAD_CLUSTER_STATE_FORMAT = 'BAD_CLUSTER_STATE_FORMAT'
 CIB_CANNOT_FIND_MANDATORY_SECTION = "CIB_CANNOT_FIND_MANDATORY_SECTION"
 CIB_LOAD_ERROR_BAD_FORMAT = "CIB_LOAD_ERROR_BAD_FORMAT"
 CIB_LOAD_ERROR = "CIB_LOAD_ERROR"
 CIB_LOAD_ERROR_SCOPE_MISSING = "CIB_LOAD_ERROR_SCOPE_MISSING"
 CIB_PUSH_ERROR = "CIB_PUSH_ERROR"
+CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES = "CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES"
 CMAN_BROADCAST_ALL_RINGS = 'CMAN_BROADCAST_ALL_RINGS'
 CMAN_UDPU_RESTART_REQUIRED = 'CMAN_UDPU_RESTART_REQUIRED'
 CMAN_UNSUPPORTED_COMMAND = "CMAN_UNSUPPORTED_COMMAND"
@@ -45,9 +51,11 @@ INVALID_METADATA_FORMAT = 'INVALID_METADATA_FORMAT'
 INVALID_OPTION = "INVALID_OPTION"
 INVALID_OPTION_VALUE = "INVALID_OPTION_VALUE"
 INVALID_RESOURCE_NAME = 'INVALID_RESOURCE_NAME'
+INVALID_RESPONSE_FORMAT = "INVALID_RESPONSE_FORMAT"
 INVALID_SCORE = "INVALID_SCORE"
 INVALID_TIMEOUT_VALUE = "INVALID_TIMEOUT_VALUE"
 MULTIPLE_SCORE_OPTIONS = "MULTIPLE_SCORE_OPTIONS"
+NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL = "NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL"
 NODE_COMMUNICATION_ERROR = "NODE_COMMUNICATION_ERROR",
 NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED = "NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED",
 NODE_COMMUNICATION_ERROR_PERMISSION_DENIED = "NODE_COMMUNICATION_ERROR_PERMISSION_DENIED",
@@ -58,12 +66,19 @@ NODE_COMMUNICATION_NOT_CONNECTED = "NODE_COMMUNICATION_NOT_CONNECTED"
 NODE_COMMUNICATION_STARTED = "NODE_COMMUNICATION_STARTED"
 NODE_NOT_FOUND = "NODE_NOT_FOUND"
 NON_UDP_TRANSPORT_ADDR_MISMATCH = 'NON_UDP_TRANSPORT_ADDR_MISMATCH'
+OMITTING_NODE = "OMITTING_NODE"
 PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND = "PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND"
 PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE",
 PARSE_ERROR_COROSYNC_CONF = "PARSE_ERROR_COROSYNC_CONF",
 PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE",
 QDEVICE_ALREADY_DEFINED = "QDEVICE_ALREADY_DEFINED"
+QDEVICE_ALREADY_INITIALIZED = "QDEVICE_ALREADY_INITIALIZED"
+QDEVICE_DESTROY_ERROR = "QDEVICE_DESTROY_ERROR"
+QDEVICE_DESTROY_SUCCESS = "QDEVICE_DESTROY_SUCCESS"
+QDEVICE_INITIALIZATION_ERROR = "QDEVICE_INITIALIZATION_ERROR"
+QDEVICE_INITIALIZATION_SUCCESS = "QDEVICE_INITIALIZATION_SUCCESS"
 QDEVICE_NOT_DEFINED = "QDEVICE_NOT_DEFINED"
+QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED = "QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED"
 REQUIRED_OPTION_IS_MISSING = "REQUIRED_OPTION_IS_MISSING"
 RESOURCE_CLEANUP_ERROR = "RESOURCE_CLEANUP_ERROR"
 RESOURCE_CLEANUP_TOO_TIME_CONSUMING = 'RESOURCE_CLEANUP_TOO_TIME_CONSUMING'
@@ -76,7 +91,30 @@ RRP_ACTIVE_NOT_SUPPORTED = 'RRP_ACTIVE_NOT_SUPPORTED'
 RUN_EXTERNAL_PROCESS_ERROR = "RUN_EXTERNAL_PROCESS_ERROR"
 RUN_EXTERNAL_PROCESS_FINISHED = "RUN_EXTERNAL_PROCESS_FINISHED"
 RUN_EXTERNAL_PROCESS_STARTED = "RUN_EXTERNAL_PROCESS_STARTED"
+SBD_CHECK_STARTED = "SBD_CHECK_STARTED"
+SBD_CHECK_SUCCESS = "SBD_CHECK_SUCCESS"
+SBD_CONFIG_DISTRIBUTION_STARTED = "SBD_CONFIG_DISTRIBUTION_STARTED"
+SBD_CONFIG_ACCEPTED_BY_NODE = "SBD_CONFIG_ACCEPTED_BY_NODE"
+SBD_DISABLING_STARTED = "SBD_DISABLING_STARTED"
+SBD_ENABLING_STARTED = "SBD_ENABLING_STARTED"
+SBD_NOT_INSTALLED = "SBD_NOT_INSTALLED"
+SBD_NOT_ENABLED = "SBD_NOT_ENABLED"
+SERVICE_DISABLE_ERROR = "SERVICE_DISABLE_ERROR"
+SERVICE_DISABLE_SUCCESS = "SERVICE_DISABLE_SUCCESS"
+SERVICE_ENABLE_ERROR = "SERVICE_ENABLE_ERROR"
+SERVICE_ENABLE_SUCCESS = "SERVICE_ENABLE_SUCCESS"
+SERVICE_KILL_ERROR = "SERVICE_KILL_ERROR"
+SERVICE_KILL_SUCCESS = "SERVICE_KILL_SUCCESS"
+SERVICE_START_ERROR = "SERVICE_START_ERROR"
+SERVICE_START_STARTED = "SERVICE_START_STARTED"
+SERVICE_START_SUCCESS = "SERVICE_START_SUCCESS"
+SERVICE_STOP_ERROR = "SERVICE_STOP_ERROR"
+SERVICE_STOP_STARTED = "SERVICE_STOP_STARTED"
+SERVICE_STOP_SUCCESS = "SERVICE_STOP_SUCCESS"
 UNABLE_TO_GET_AGENT_METADATA = 'UNABLE_TO_GET_AGENT_METADATA'
 UNABLE_TO_READ_COROSYNC_CONFIG = "UNABLE_TO_READ_COROSYNC_CONFIG"
+UNABLE_TO_GET_SBD_CONFIG = "UNABLE_TO_GET_SBD_CONFIG"
+UNABLE_TO_GET_SBD_STATUS = "UNABLE_TO_GET_SBD_STATUS"
 UNKNOWN_COMMAND = 'UNKNOWN_COMMAND'
-UNSUPPORTED_RESOURCE_AGENT = 'UNSUPPORTED_RESOURCE_AGENT'
+UNSUPPORTED_AGENT = 'UNSUPPORTED_AGENT'
+WATCHDOG_NOT_FOUND = "WATCHDOG_NOT_FOUND"
diff --git a/pcs/common/tools.py b/pcs/common/tools.py
index 7c698e8..f4f6c4b 100644
--- a/pcs/common/tools.py
+++ b/pcs/common/tools.py
@@ -5,6 +5,8 @@ from __future__ import (
     unicode_literals,
 )
 
+import threading
+
 
 def simple_cache(func):
     cache = {
@@ -19,3 +21,15 @@ def simple_cache(func):
         return cache["value"]
 
     return wrapper
+
+
+def run_parallel(worker, data_list):
+    thread_list = []
+    for args, kwargs in data_list:
+        thread = threading.Thread(target=worker, args=args, kwargs=kwargs)
+        thread.daemon = True
+        thread_list.append(thread)
+        thread.start()
+
+    for thread in thread_list:
+        thread.join()
diff --git a/pcs/constraint.py b/pcs/constraint.py
index 95218d0..5d9b0df 100644
--- a/pcs/constraint.py
+++ b/pcs/constraint.py
@@ -92,11 +92,12 @@ def constraint_cmd(argv):
                 "add": ticket_command.add,
                 "show": ticket_command.show,
             }
-            if argv[0] not in command_map:
+            sub_command = argv[0] if argv else "show"
+            if sub_command not in command_map:
                 raise CmdLineInputError()
-            usage_name = "ticket "+argv[0]
+            usage_name = "ticket "+sub_command
 
-            command_map[argv[0]](lib, argv[1:], modificators)
+            command_map[sub_command](lib, argv[1:], modificators)
         except LibraryError as e:
             utils.process_library_reports(e.args)
         except CmdLineInputError as e:
diff --git a/pcs/lib/cib/constraint/ticket.py b/pcs/lib/cib/constraint/ticket.py
index 6b7cdb2..4154aac 100644
--- a/pcs/lib/cib/constraint/ticket.py
+++ b/pcs/lib/cib/constraint/ticket.py
@@ -52,7 +52,7 @@ def prepare_options_with_set(cib, options, resource_set_list):
         validate_id=partial(tools.check_new_id_applicable, cib, DESCRIPTION),
     )
     report  = _validate_options_common(options)
-    if "ticket" not in options:
+    if "ticket" not in options or not options["ticket"].strip():
         report.append(reports.required_option_is_missing('ticket'))
     if report:
         raise LibraryError(*report)
diff --git a/pcs/lib/cib/resource.py b/pcs/lib/cib/resource.py
index eb368fa..ed692f6 100644
--- a/pcs/lib/cib/resource.py
+++ b/pcs/lib/cib/resource.py
@@ -9,7 +9,7 @@ TAGS_CLONE = "clone", "master"
 TAGS_ALL = TAGS_CLONE + ("primitive", "group")
 
 def find_by_id(tree, id):
-    element = tree.find('.//*[@id="{0}"]'.format(id))
-    if element is None or element.tag not in TAGS_ALL:
-        return None
-    return element
+    for element in tree.findall('.//*[@id="{0}"]'.format(id)):
+        if element is not None and element.tag in TAGS_ALL:
+            return element
+    return None
diff --git a/pcs/lib/cib/test/test_constraint_ticket.py b/pcs/lib/cib/test/test_constraint_ticket.py
index 4f21500..87fd1e5 100644
--- a/pcs/lib/cib/test/test_constraint_ticket.py
+++ b/pcs/lib/cib/test/test_constraint_ticket.py
@@ -222,6 +222,19 @@ class PrepareOptionsWithSetTest(TestCase):
             )
         )
 
+    def test_refuse_empty_ticket(self, _):
+        assert_raise_library_error(
+            lambda: self.prepare({
+                "loss-policy": "stop",
+                "id": "id",
+                "ticket": " "
+            }),
+            (
+                severities.ERROR,
+                report_codes.REQUIRED_OPTION_IS_MISSING,
+                {"option_name": "ticket"}
+            )
+        )
 
 class Element(object):
     def __init__(self, attrib):
diff --git a/pcs/lib/cib/test/test_resource.py b/pcs/lib/cib/test/test_resource.py
new file mode 100644
index 0000000..ef33ef6
--- /dev/null
+++ b/pcs/lib/cib/test/test_resource.py
@@ -0,0 +1,21 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+from lxml import etree
+from pcs.lib.cib.resource import find_by_id
+
+class FindByIdTest(TestCase):
+    def test_find_correct_tag(self):
+        tree = etree.XML("""
+            <root>
+                <rsc_set id="A" />
+                <primitive id="A" />
+            </root>
+        """)
+        element = find_by_id(tree, "A")
+        self.assertEqual(element.tag, "primitive")
diff --git a/pcs/lib/commands/qdevice.py b/pcs/lib/commands/qdevice.py
new file mode 100644
index 0000000..c300a4c
--- /dev/null
+++ b/pcs/lib/commands/qdevice.py
@@ -0,0 +1,152 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.lib import external, reports
+from pcs.lib.corosync import qdevice_net
+from pcs.lib.errors import LibraryError
+
+
+def qdevice_setup(lib_env, model, enable, start):
+    """
+    Initialize qdevice on local host with specified model
+    string model qdevice model to initialize
+    bool enable make qdevice service start on boot
+    bool start start qdevice now
+    """
+    _ensure_not_cman(lib_env)
+    _check_model(model)
+    qdevice_net.qdevice_setup(lib_env.cmd_runner())
+    lib_env.report_processor.process(
+        reports.qdevice_initialization_success(model)
+    )
+    if enable:
+        _service_enable(lib_env, qdevice_net.qdevice_enable)
+    if start:
+        _service_start(lib_env, qdevice_net.qdevice_start)
+
+def qdevice_destroy(lib_env, model):
+    """
+    Stop and disable qdevice on local host and remove its configuration
+    string model qdevice model to initialize
+    """
+    _ensure_not_cman(lib_env)
+    _check_model(model)
+    _service_stop(lib_env, qdevice_net.qdevice_stop)
+    _service_disable(lib_env, qdevice_net.qdevice_disable)
+    qdevice_net.qdevice_destroy()
+    lib_env.report_processor.process(reports.qdevice_destroy_success(model))
+
+def qdevice_enable(lib_env, model):
+    """
+    make qdevice start automatically on boot on local host
+    """
+    _ensure_not_cman(lib_env)
+    _check_model(model)
+    _service_enable(lib_env, qdevice_net.qdevice_enable)
+
+def qdevice_disable(lib_env, model):
+    """
+    make qdevice not start automatically on boot on local host
+    """
+    _ensure_not_cman(lib_env)
+    _check_model(model)
+    _service_disable(lib_env, qdevice_net.qdevice_disable)
+
+def qdevice_start(lib_env, model):
+    """
+    start qdevice now on local host
+    """
+    _ensure_not_cman(lib_env)
+    _check_model(model)
+    _service_start(lib_env, qdevice_net.qdevice_start)
+
+def qdevice_stop(lib_env, model):
+    """
+    stop qdevice now on local host
+    """
+    _ensure_not_cman(lib_env)
+    _check_model(model)
+    _service_stop(lib_env, qdevice_net.qdevice_stop)
+
+def qdevice_kill(lib_env, model):
+    """
+    kill qdevice now on local host
+    """
+    _ensure_not_cman(lib_env)
+    _check_model(model)
+    _service_kill(lib_env, qdevice_net.qdevice_kill)
+
+def _ensure_not_cman(lib_env):
+    if lib_env.is_cman_cluster:
+        raise LibraryError(reports.cman_unsupported_command())
+
+def _check_model(model):
+    if model != "net":
+        raise LibraryError(
+            reports.invalid_option_value("model", model, ["net"])
+        )
+
+def _service_start(lib_env, func):
+    lib_env.report_processor.process(
+        reports.service_start_started("quorum device")
+    )
+    try:
+        func(lib_env.cmd_runner())
+    except external.StartServiceError as e:
+        raise LibraryError(
+            reports.service_start_error(e.service, e.message)
+        )
+    lib_env.report_processor.process(
+        reports.service_start_success("quorum device")
+    )
+
+def _service_stop(lib_env, func):
+    lib_env.report_processor.process(
+        reports.service_stop_started("quorum device")
+    )
+    try:
+        func(lib_env.cmd_runner())
+    except external.StopServiceError as e:
+        raise LibraryError(
+            reports.service_stop_error(e.service, e.message)
+        )
+    lib_env.report_processor.process(
+        reports.service_stop_success("quorum device")
+    )
+
+def _service_kill(lib_env, func):
+    try:
+        func(lib_env.cmd_runner())
+    except external.KillServicesError as e:
+        raise LibraryError(
+            reports.service_kill_error(e.service, e.message)
+        )
+    lib_env.report_processor.process(
+        reports.service_kill_success(["quorum device"])
+    )
+
+def _service_enable(lib_env, func):
+    try:
+        func(lib_env.cmd_runner())
+    except external.EnableServiceError as e:
+        raise LibraryError(
+            reports.service_enable_error(e.service, e.message)
+        )
+    lib_env.report_processor.process(
+        reports.service_enable_success("quorum device")
+    )
+
+def _service_disable(lib_env, func):
+    try:
+        func(lib_env.cmd_runner())
+    except external.DisableServiceError as e:
+        raise LibraryError(
+            reports.service_disable_error(e.service, e.message)
+        )
+    lib_env.report_processor.process(
+        reports.service_disable_success("quorum device")
+    )
diff --git a/pcs/lib/commands/sbd.py b/pcs/lib/commands/sbd.py
new file mode 100644
index 0000000..875758f
--- /dev/null
+++ b/pcs/lib/commands/sbd.py
@@ -0,0 +1,355 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import json
+
+from pcs import settings
+from pcs.common import (
+    tools,
+    report_codes,
+)
+from pcs.lib import (
+    sbd,
+    reports,
+    nodes_task,
+)
+from pcs.lib.tools import environment_file_to_dict
+from pcs.lib.errors import (
+    LibraryError,
+    ReportItemSeverity as Severities
+)
+from pcs.lib.external import (
+    node_communicator_exception_to_report_item,
+    NodeCommunicationException,
+    NodeConnectionException,
+    NodeCommandUnsuccessfulException,
+)
+from pcs.lib.node import (
+    NodeAddressesList,
+    NodeNotFound
+)
+
+
+def _validate_sbd_options(sbd_config, allow_unknown_opts=False):
+    """
+    Validate user SBD configuration. Options 'SBD_WATCHDOG_DEV' and 'SBD_OPTS'
+    are restricted. Returns list of ReportItem
+
+    sbd_config -- dictionary in format: <SBD config option>: <value>
+    allow_unknown_opts -- if True, accept also unknown options.
+    """
+
+    report_item_list = []
+    unsupported_sbd_option_list = ["SBD_WATCHDOG_DEV", "SBD_OPTS"]
+    allowed_sbd_options = [
+        "SBD_DELAY_START", "SBD_STARTMODE", "SBD_WATCHDOG_TIMEOUT"
+    ]
+    for sbd_opt in sbd_config:
+        if sbd_opt in unsupported_sbd_option_list:
+            report_item_list.append(reports.invalid_option(
+                sbd_opt, allowed_sbd_options, None
+            ))
+
+        elif sbd_opt not in allowed_sbd_options:
+            report_item_list.append(reports.invalid_option(
+                sbd_opt,
+                allowed_sbd_options,
+                None,
+                Severities.WARNING if allow_unknown_opts else Severities.ERROR,
+                None if allow_unknown_opts else report_codes.FORCE_OPTIONS
+            ))
+
+    return report_item_list
+
+
+def _get_full_watchdog_list(node_list, default_watchdog, watchdog_dict):
+    """
+    Validate if all nodes in watchdog_dict does exist and returns dictionary
+    where keys are nodes and value is corresponding watchdog.
+    Raises LibraryError if any of nodes doesn't belong to cluster.
+
+    node_list -- NodeAddressesList
+    default_watchdog -- watchdog for nodes which are not specified
+        in watchdog_dict
+    watchdog_dict -- dictionary with node names as keys and value as watchdog
+    """
+    full_dict = dict([(node, default_watchdog) for node in node_list])
+    report_item_list = []
+
+    for node_name, watchdog in watchdog_dict.items():
+        try:
+            full_dict[node_list.find_by_label(node_name)] = watchdog
+        except NodeNotFound:
+            report_item_list.append(reports.node_not_found(node_name))
+
+    if report_item_list:
+        raise LibraryError(*report_item_list)
+
+    return full_dict
+
+
+def enable_sbd(
+        lib_env, default_watchdog, watchdog_dict, sbd_options,
+        allow_unknown_opts=False, ignore_offline_nodes=False
+):
+    """
+    Enable SBD on all nodes in cluster.
+
+    lib_env -- LibraryEnvironment
+    default_watchdog -- watchdog for nodes which are not specified in
+        watchdog_dict. Uses default value from settings if None.
+    watchdog_dict -- dictionary with NodeAddresses as keys and watchdog path
+        as value
+    sbd_options -- dictionary in format: <SBD config option>: <value>
+    allow_unknown_opts -- if True, accept also unknown options.
+    ignore_offline_nodes -- if True, omit offline nodes
+    """
+    __ensure_not_cman(lib_env)
+
+    node_list = _get_cluster_nodes(lib_env)
+
+    if not default_watchdog:
+        default_watchdog = settings.sbd_watchdog_default
+
+    # input validation begin
+    full_watchdog_dict = _get_full_watchdog_list(
+        node_list, default_watchdog, watchdog_dict
+    )
+
+    # config validation
+    sbd_options = dict([(opt.upper(), val) for opt, val in sbd_options.items()])
+    lib_env.report_processor.process_list(
+        _validate_sbd_options(sbd_options, allow_unknown_opts)
+    )
+
+    # check nodes status
+    online_nodes = _get_online_nodes(lib_env, node_list, ignore_offline_nodes)
+    for node in list(full_watchdog_dict):
+        if node not in online_nodes:
+            full_watchdog_dict.pop(node, None)
+    # input validation end
+
+    # check if SBD can be enabled
+    sbd.check_sbd_on_all_nodes(
+        lib_env.report_processor,
+        lib_env.node_communicator(),
+        full_watchdog_dict
+    )
+
+    # distribute SBD configuration
+    config = sbd.get_default_sbd_config()
+    config.update(sbd_options)
+    sbd.set_sbd_config_on_all_nodes(
+        lib_env.report_processor,
+        lib_env.node_communicator(),
+        online_nodes,
+        config
+    )
+
+    # remove cluster prop 'stonith_watchdog_timeout'
+    sbd.remove_stonith_watchdog_timeout_on_all_nodes(
+        lib_env.node_communicator(), online_nodes
+    )
+
+    # enable SBD service an all nodes
+    sbd.enable_sbd_service_on_all_nodes(
+        lib_env.report_processor, lib_env.node_communicator(), online_nodes
+    )
+
+    lib_env.report_processor.process(
+        reports.cluster_restart_required_to_apply_changes()
+    )
+
+
+def disable_sbd(lib_env, ignore_offline_nodes=False):
+    """
+    Disable SBD on all nodes in cluster.
+
+    lib_env -- LibraryEnvironment
+    ignore_offline_nodes -- if True, omit offline nodes
+    """
+    __ensure_not_cman(lib_env)
+
+    node_list = _get_online_nodes(
+        lib_env, _get_cluster_nodes(lib_env), ignore_offline_nodes
+    )
+
+    sbd.set_stonith_watchdog_timeout_to_zero_on_all_nodes(
+        lib_env.node_communicator(), node_list
+    )
+    sbd.disable_sbd_service_on_all_nodes(
+        lib_env.report_processor,
+        lib_env.node_communicator(),
+        node_list
+    )
+
+    lib_env.report_processor.process(
+        reports.cluster_restart_required_to_apply_changes()
+    )
+
+
+def _get_online_nodes(lib_env, node_list, ignore_offline_nodes=False):
+    """
+    Returns NodeAddressesList of online nodes.
+    Raises LibraryError on any failure.
+
+    lib_env -- LibraryEnvironment
+    node_list -- NodeAddressesList
+    ignore_offline_nodes -- if True offline nodes are just omitted from
+        returned list.
+    """
+    to_raise = []
+    online_node_list = NodeAddressesList()
+
+    def is_node_online(node):
+        try:
+            nodes_task.node_check_auth(lib_env.node_communicator(), node)
+            online_node_list.append(node)
+        except NodeConnectionException as e:
+            if ignore_offline_nodes:
+                to_raise.append(reports.omitting_node(node.label))
+            else:
+                to_raise.append(node_communicator_exception_to_report_item(
+                    e, Severities.ERROR, report_codes.SKIP_OFFLINE_NODES
+                ))
+        except NodeCommunicationException as e:
+            to_raise.append(node_communicator_exception_to_report_item(e))
+
+    tools.run_parallel(is_node_online, [([node], {}) for node in node_list])
+
+    lib_env.report_processor.process_list(to_raise)
+    return online_node_list
+
+
+def get_cluster_sbd_status(lib_env):
+    """
+    Returns status of SBD service in cluster in dictionary with format:
+    {
+        <NodeAddress>: {
+            "installed": <boolean>,
+            "enabled": <boolean>,
+            "running": <boolean>
+        },
+        ...
+    }
+
+    lib_env -- LibraryEnvironment
+    """
+    __ensure_not_cman(lib_env)
+
+    node_list = _get_cluster_nodes(lib_env)
+    report_item_list = []
+    successful_node_list = []
+    status_list = []
+
+    def get_sbd_status(node):
+        try:
+            status_list.append({
+                "node": node,
+                "status": json.loads(
+                    sbd.check_sbd(lib_env.node_communicator(), node, "")
+                )["sbd"]
+            })
+            successful_node_list.append(node)
+        except NodeCommunicationException as e:
+            report_item_list.append(reports.unable_to_get_sbd_status(
+                node.label,
+                node_communicator_exception_to_report_item(e).message
+            ))
+        except (ValueError, KeyError) as e:
+            report_item_list.append(reports.unable_to_get_sbd_status(
+                node.label, str(e)
+            ))
+
+    tools.run_parallel(get_sbd_status, [([node], {}) for node in node_list])
+    lib_env.report_processor.process_list(report_item_list)
+
+    for node in node_list:
+        if node not in successful_node_list:
+            status_list.append({
+                "node": node,
+                "status": {
+                    "installed": None,
+                    "enabled": None,
+                    "running": None
+                }
+            })
+    return status_list
+
+
+def get_cluster_sbd_config(lib_env):
+    """
+    Returns list of SBD config from all cluster nodes in cluster. Structure
+    of data:
+    [
+        {
+            "node": <NodeAddress>
+            "config": <sbd_config_dict> or None if there was failure,
+        },
+        ...
+    ]
+    If error occurs while obtaining config from some node, it's config will be
+    None. If obtaining config fail on all node returns empty dictionary.
+
+    lib_env -- LibraryEnvironment
+    """
+    __ensure_not_cman(lib_env)
+
+    node_list = _get_cluster_nodes(lib_env)
+    config_list = []
+    successful_node_list = []
+    report_item_list = []
+
+    def get_sbd_config(node):
+        try:
+            config_list.append({
+                "node": node,
+                "config": environment_file_to_dict(
+                    sbd.get_sbd_config(lib_env.node_communicator(), node)
+                )
+            })
+            successful_node_list.append(node)
+        except NodeCommandUnsuccessfulException as e:
+            report_item_list.append(reports.unable_to_get_sbd_config(
+                node.label,
+                e.reason,
+                Severities.WARNING
+            ))
+        except NodeCommunicationException as e:
+            report_item_list.append(reports.unable_to_get_sbd_config(
+                node.label,
+                node_communicator_exception_to_report_item(e).message,
+                Severities.WARNING
+            ))
+
+    tools.run_parallel(get_sbd_config, [([node], {}) for node in node_list])
+    lib_env.report_processor.process_list(report_item_list)
+
+    if not len(config_list):
+        return []
+
+    for node in node_list:
+        if node not in successful_node_list:
+            config_list.append({
+                "node": node,
+                "config": None
+            })
+    return config_list
+
+
+def get_local_sbd_config(lib_env):
+    __ensure_not_cman(lib_env)
+    return environment_file_to_dict(sbd.get_local_sbd_config())
+
+
+def _get_cluster_nodes(lib_env):
+    return lib_env.get_corosync_conf().get_nodes()
+
+
+def __ensure_not_cman(lib_env):
+    if lib_env.is_cman_cluster:
+        raise LibraryError(reports.cman_unsupported_command())
diff --git a/pcs/lib/corosync/config_facade.py b/pcs/lib/corosync/config_facade.py
index ff8d33b..5a486ca 100644
--- a/pcs/lib/corosync/config_facade.py
+++ b/pcs/lib/corosync/config_facade.py
@@ -329,9 +329,8 @@ class ConfigFacade(object):
     def __validate_quorum_device_model_net_options(
         self, model_options, need_required, force=False
     ):
-        required_options = frozenset(["host"])
+        required_options = frozenset(["host", "algorithm"])
         optional_options = frozenset([
-            "algorithm",
             "connect_timeout",
             "force_ip_version",
             "port",
diff --git a/pcs/lib/corosync/qdevice_net.py b/pcs/lib/corosync/qdevice_net.py
new file mode 100644
index 0000000..7479257
--- /dev/null
+++ b/pcs/lib/corosync/qdevice_net.py
@@ -0,0 +1,74 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import os.path
+import shutil
+
+from pcs import settings
+from pcs.lib import external, reports
+from pcs.lib.errors import LibraryError
+
+
+__model = "net"
+__service_name = "corosync-qnetd"
+
+def qdevice_setup(runner):
+    """
+    initialize qdevice on local host
+    """
+    if external.is_dir_nonempty(settings.corosync_qdevice_net_server_certs_dir):
+        raise LibraryError(reports.qdevice_already_initialized(__model))
+
+    output, retval = runner.run([
+        os.path.join(settings.corosync_binaries, "corosync-qnetd-certutil"),
+        "-i"
+    ])
+    if retval != 0:
+        raise LibraryError(
+            reports.qdevice_initialization_error(__model, output.rstrip())
+        )
+
+def qdevice_destroy():
+    """
+    delete qdevice configuration on local host
+    """
+    try:
+        shutil.rmtree(settings.corosync_qdevice_net_server_certs_dir)
+    except EnvironmentError as e:
+        raise LibraryError(
+            reports.qdevice_destroy_error(__model, e.strerror)
+        )
+
+def qdevice_enable(runner):
+    """
+    make qdevice start automatically on boot on local host
+    """
+    external.enable_service(runner, __service_name)
+
+def qdevice_disable(runner):
+    """
+    make qdevice not start automatically on boot on local host
+    """
+    external.disable_service(runner, __service_name)
+
+def qdevice_start(runner):
+    """
+    start qdevice now on local host
+    """
+    external.start_service(runner, __service_name)
+
+def qdevice_stop(runner):
+    """
+    stop qdevice now on local host
+    """
+    external.stop_service(runner, __service_name)
+
+def qdevice_kill(runner):
+    """
+    kill qdevice now on local host
+    """
+    external.kill_services(runner, [__service_name])
diff --git a/pcs/lib/external.py b/pcs/lib/external.py
index 231cd9c..34426f9 100644
--- a/pcs/lib/external.py
+++ b/pcs/lib/external.py
@@ -49,13 +49,236 @@ except ImportError:
 
 from pcs.lib import reports
 from pcs.lib.errors import LibraryError, ReportItemSeverity
+from pcs.common.tools import simple_cache
 from pcs import settings
 
 
+class ManageServiceError(Exception):
+    #pylint: disable=super-init-not-called
+    def __init__(self, service, message=None):
+        self.service = service
+        self.message = message
+
+class DisableServiceError(ManageServiceError):
+    pass
+
+class EnableServiceError(ManageServiceError):
+    pass
+
+class StartServiceError(ManageServiceError):
+    pass
+
+class StopServiceError(ManageServiceError):
+    pass
+
+class KillServicesError(ManageServiceError):
+    pass
+
+
 def is_path_runnable(path):
     return os.path.isfile(path) and os.access(path, os.X_OK)
 
 
+def is_dir_nonempty(path):
+    if not os.path.exists(path):
+        return False
+    if not os.path.isdir(path):
+        return True
+    return len(os.listdir(path)) > 0
+
+
+ at simple_cache
+def is_systemctl():
+    """
+    Check whenever is local system running on systemd.
+    Returns True if current system is systemctl compatible, False otherwise.
+    """
+    systemctl_paths = [
+        '/usr/bin/systemctl',
+        '/bin/systemctl',
+        '/var/run/systemd/system',
+    ]
+    for path in systemctl_paths:
+        if os.path.exists(path):
+            return True
+    return False
+
+
+def disable_service(runner, service):
+    """
+    Disable specified service in local system.
+    Raise DisableServiceError or LibraryError on failure.
+
+    runner -- CommandRunner
+    service -- name of service
+    """
+    if is_systemctl():
+        output, retval = runner.run([
+            "systemctl", "disable", service + ".service"
+        ])
+    else:
+        if not is_service_installed(runner, service):
+            return
+        output, retval = runner.run(["chkconfig", service, "off"])
+    if retval != 0:
+        raise DisableServiceError(service, output.rstrip())
+
+
+def enable_service(runner, service):
+    """
+    Enable specified service in local system.
+    Raise EnableServiceError or LibraryError on failure.
+
+    runner -- CommandRunner
+    service -- name of service
+    """
+    if is_systemctl():
+        output, retval = runner.run([
+            "systemctl", "enable", service + ".service"
+        ])
+    else:
+        output, retval = runner.run(["chkconfig", service, "on"])
+    if retval != 0:
+        raise EnableServiceError(service, output.rstrip())
+
+
+def start_service(runner, service):
+    """
+    Start specified service in local system
+    CommandRunner runner
+    string service service name
+    """
+    if is_systemctl():
+        output, retval = runner.run([
+            "systemctl", "start", "{0}.service".format(service)
+        ])
+    else:
+        output, retval = runner.run(["service", service, "start"])
+    if retval != 0:
+        raise StartServiceError(service, output.rstrip())
+
+
+def stop_service(runner, service):
+    """
+    Stop specified service in local system
+    CommandRunner runner
+    string service service name
+    """
+    if is_systemctl():
+        output, retval = runner.run([
+            "systemctl", "stop", "{0}.service".format(service)
+        ])
+    else:
+        output, retval = runner.run(["service", service, "stop"])
+    if retval != 0:
+        raise StopServiceError(service, output.rstrip())
+
+
+def kill_services(runner, services):
+    """
+    Kill specified services in local system
+    CommandRunner runner
+    iterable services service names
+    """
+    # make killall not report that a process is not running
+    output, retval = runner.run(
+        ["killall", "--quiet", "--signal", "9", "--"] + list(services)
+    )
+    # If a process isn't running, killall will still return 1 even with --quiet.
+    # We don't consider that an error, so we check for output string as well.
+    # If it's empty, no actuall error happened.
+    if retval != 0:
+        if output.strip():
+            raise KillServicesError(list(services), output.rstrip())
+
+
+def is_service_enabled(runner, service):
+    """
+    Check if specified service is enabled in local system.
+
+    runner -- CommandRunner
+    service -- name of service
+    """
+    if is_systemctl():
+        _, retval = runner.run(
+            ["systemctl", "is-enabled", service + ".service"]
+        )
+    else:
+        _, retval = runner.run(["chkconfig", service])
+
+    return retval == 0
+
+
+def is_service_running(runner, service):
+    """
+    Check if specified service is currently running on local system.
+
+    runner -- CommandRunner
+    service -- name of service
+    """
+    if is_systemctl():
+        _, retval = runner.run(["systemctl", "is-active", service + ".service"])
+    else:
+        _, retval = runner.run(["service", service, "status"])
+
+    return retval == 0
+
+
+def is_service_installed(runner, service):
+    """
+    Check if specified service is installed on local system.
+
+    runner -- CommandRunner
+    service -- name of service
+    """
+    if is_systemctl():
+        return service in get_systemd_services(runner)
+    else:
+        return service in get_non_systemd_services(runner)
+
+
+def get_non_systemd_services(runner):
+    """
+    Returns list of all installed services on non systemd system.
+
+    runner -- CommandRunner
+    """
+    if is_systemctl():
+        return []
+
+    output, return_code = runner.run(["chkconfig"], ignore_stderr=True)
+    if return_code != 0:
+        return []
+
+    service_list = []
+    for service in output.splitlines():
+        service = service.split(" ", 1)[0]
+        if service:
+            service_list.append(service)
+    return service_list
+
+
+def get_systemd_services(runner):
+    """
+    Returns list of all systemd services installed on local system.
+
+    runner -- CommandRunner
+    """
+    if not is_systemctl():
+        return []
+
+    output, return_code = runner.run(["systemctl", "list-unit-files", "--full"])
+    if return_code != 0:
+        return []
+
+    service_list = []
+    for service in output.splitlines():
+        match = re.search(r'^([\S]*)\.service', service)
+        if match:
+            service_list.append(match.group(1))
+    return service_list
+
+
 def is_cman_cluster(runner):
     """
     Detect if underlaying locally installed cluster is CMAN based
@@ -155,6 +378,8 @@ class NodeAuthenticationException(NodeCommunicationException):
 class NodePermissionDeniedException(NodeCommunicationException):
     pass
 
+class NodeCommandUnsuccessfulException(NodeCommunicationException):
+    pass
 
 class NodeUnsupportedCommandException(NodeCommunicationException):
     pass
@@ -166,6 +391,12 @@ def node_communicator_exception_to_report_item(
     """
     Transform NodeCommunicationException to ReportItem
     """
+    if isinstance(e, NodeCommandUnsuccessfulException):
+        return reports.node_communication_command_unsuccessful(
+            e.node,
+            e.command,
+            e.reason
+        )
     exception_to_report = {
         NodeAuthenticationException:
             reports.node_communication_error_not_authorized,
@@ -285,7 +516,14 @@ class NodeCommunicator(object):
             self._reporter.process(
                 reports.node_communication_finished(url, e.code, response_data)
             )
-            if e.code == 401:
+            if e.code == 400:
+                # old pcsd protocol: error messages are commonly passed in plain
+                # text in response body with HTTP code 400
+                # we need to be backward compatible with that
+                raise NodeCommandUnsuccessfulException(
+                    host, request, response_data
+                )
+            elif e.code == 401:
                 raise NodeAuthenticationException(
                     host, request, "HTTP error: {0}".format(e.code)
                 )
diff --git a/pcs/lib/node.py b/pcs/lib/node.py
index 0ddd405..f3bfe57 100644
--- a/pcs/lib/node.py
+++ b/pcs/lib/node.py
@@ -6,12 +6,44 @@ from __future__ import (
 )
 
 
+class NodeNotFound(Exception):
+    pass
+
+
 class NodeAddresses(object):
     def __init__(self, ring0, ring1=None, name=None, id=None):
-        self.ring0 = ring0
-        self.ring1 = ring1
-        self.name = name
-        self.id = id
+        self._ring0 = ring0
+        self._ring1 = ring1
+        self._name = name
+        self._id = id
+
+    def __hash__(self):
+        return hash(self.label)
+
+    def __eq__(self, other):
+        return self.label == other.label
+
+    def __ne__(self, other):
+        return not (self == other)
+
+    def __lt__(self, other):
+        return self.label < other.label
+
+    @property
+    def ring0(self):
+        return self._ring0
+
+    @property
+    def ring1(self):
+        return self._ring1
+
+    @property
+    def name(self):
+        return self._name
+
+    @property
+    def id(self):
+        return self._id
 
     @property
     def label(self):
@@ -38,3 +70,9 @@ class NodeAddressesList(object):
 
     def __reversed__(self):
         return self._list.__reversed__()
+
+    def find_by_label(self, label):
+        for node in self._list:
+            if node.label == label:
+                return node
+        raise NodeNotFound()
diff --git a/pcs/lib/nodes_task.py b/pcs/lib/nodes_task.py
index d714d70..b9a61f6 100644
--- a/pcs/lib/nodes_task.py
+++ b/pcs/lib/nodes_task.py
@@ -11,6 +11,7 @@ from pcs.common import report_codes
 from pcs.lib import reports
 from pcs.lib.errors import ReportItemSeverity
 from pcs.lib.external import (
+    NodeCommunicator,
     NodeCommunicationException,
     node_communicator_exception_to_report_item,
 )
@@ -115,3 +116,17 @@ def check_corosync_offline_on_nodes(
                 )
             )
     reporter.process_list(report_items)
+
+
+def node_check_auth(communicator, node):
+    """
+    Check authentication and online status of 'node'.
+
+    communicator -- NodeCommunicator
+    node -- NodeAddresses
+    """
+    communicator.call_node(
+        node,
+        "remote/check_auth",
+        NodeCommunicator.format_data_dict({"check_auth_only": 1})
+    )
diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
index e54bce8..4f4f580 100644
--- a/pcs/lib/reports.py
+++ b/pcs/lib/reports.py
@@ -411,6 +411,22 @@ def node_communication_error_unsupported_command(
         forceable=forceable
     )
 
+def node_communication_command_unsuccessful(node, command, reason):
+    """
+    node rejected a request for another reason with a plain text explanation
+    node string node address / name
+    reason string decription of the error
+    """
+    return ReportItem.error(
+        report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+        "{node}: {reason}",
+        info={
+            "node": node,
+            "command": command,
+            "reason": reason,
+        }
+    )
+
 def node_communication_error_other_error(
     node, command, reason,
     severity=ReportItemSeverity.ERROR, forceable=None
@@ -616,6 +632,84 @@ def qdevice_not_defined():
         "no quorum device is defined in this cluster"
     )
 
+def qdevice_remove_or_cluster_stop_needed():
+    """
+    operation cannot be executed, qdevice removal or cluster stop is needed
+    """
+    return ReportItem.error(
+        report_codes.QDEVICE_REMOVE_OR_CLUSTER_STOP_NEEDED,
+        "You need to stop the cluster or remove qdevice from cluster to continue"
+    )
+
+def qdevice_already_initialized(model):
+    """
+    cannot create qdevice on local host, it has been already created
+    string model qdevice model
+    """
+    return ReportItem.error(
+        report_codes.QDEVICE_ALREADY_INITIALIZED,
+        "Quorum device '{model}' has been already initialized",
+        info={
+            "model": model,
+        }
+    )
+
+def qdevice_initialization_success(model):
+    """
+    qdevice was successfully initialized on local host
+    string model qdevice model
+    """
+    return ReportItem.info(
+        report_codes.QDEVICE_INITIALIZATION_SUCCESS,
+        "Quorum device '{model}' initialized",
+        info={
+            "model": model,
+        }
+    )
+
+def qdevice_initialization_error(model, reason):
+    """
+    an error occured when creating qdevice on local host
+    string model qdevice model
+    string reason an error message
+    """
+    return ReportItem.error(
+        report_codes.QDEVICE_INITIALIZATION_ERROR,
+        "Unable to initialize quorum device '{model}': {reason}",
+        info={
+            "model": model,
+            "reason": reason,
+        }
+    )
+
+def qdevice_destroy_success(model):
+    """
+    qdevice configuration successfully removed from local host
+    string model qdevice model
+    """
+    return ReportItem.info(
+        report_codes.QDEVICE_DESTROY_SUCCESS,
+        "Quorum device '{model}' configuration files removed",
+        info={
+            "model": model,
+        }
+    )
+
+def qdevice_destroy_error(model, reason):
+    """
+    an error occured when removing qdevice configuration from local host
+    string model qdevice model
+    string reason an error message
+    """
+    return ReportItem.error(
+        report_codes.QDEVICE_DESTROY_ERROR,
+        "Unable to destroy quorum device '{model}': {reason}",
+        info={
+            "model": model,
+            "reason": reason,
+        }
+    )
+
 def cman_unsupported_command():
     """
     requested library command is not available as local cluster is CMAN based
@@ -914,3 +1008,431 @@ def cman_broadcast_all_rings():
         "Enabling broadcast for all rings as CMAN does not support "
             + "broadcast in only one ring"
     )
+
+def service_start_started(service):
+    """
+    system service is being started
+    string service service name or description
+    """
+    return ReportItem.info(
+        report_codes.SERVICE_START_STARTED,
+        "Starting {service}...",
+        info={
+            "service": service,
+        }
+    )
+
+def service_start_error(service, reason):
+    """
+    system service start failed
+    string service service name or description
+    string reason error message
+    """
+    return ReportItem.error(
+        report_codes.SERVICE_START_ERROR,
+        "Unable to start {service}: {reason}",
+        info={
+            "service": service,
+            "reason": reason,
+        }
+    )
+
+def service_start_success(service):
+    """
+    system service was started successfully
+    string service service name or description
+    """
+    return ReportItem.info(
+        report_codes.SERVICE_START_SUCCESS,
+        "{service} started",
+        info={
+            "service": service,
+        }
+    )
+
+def service_stop_started(service):
+    """
+    system service is being stopped
+    string service service name or description
+    """
+    return ReportItem.info(
+        report_codes.SERVICE_STOP_STARTED,
+        "Stopping {service}...",
+        info={
+            "service": service,
+        }
+    )
+
+def service_stop_error(service, reason):
+    """
+    system service stop failed
+    string service service name or description
+    string reason error message
+    """
+    return ReportItem.error(
+        report_codes.SERVICE_STOP_ERROR,
+        "Unable to stop {service}: {reason}",
+        info={
+            "service": service,
+            "reason": reason,
+        }
+    )
+
+def service_stop_success(service):
+    """
+    system service was stopped successfully
+    string service service name or description
+    """
+    return ReportItem.info(
+        report_codes.SERVICE_STOP_SUCCESS,
+        "{service} stopped",
+        info={
+            "service": service,
+        }
+    )
+
+def service_kill_error(services, reason):
+    """
+    system services kill failed
+    iterable services services name or description
+    string reason error message
+    """
+    return ReportItem.error(
+        report_codes.SERVICE_KILL_ERROR,
+        "Unable to kill {services_str}: {reason}",
+        info={
+            "services": services,
+            "services_str": ", ".join(services),
+            "reason": reason,
+        }
+    )
+
+def service_kill_success(services):
+    """
+    system services were killed successfully
+    iterable services services name or description
+    """
+    return ReportItem.info(
+        report_codes.SERVICE_KILL_SUCCESS,
+        "{services_str} killed",
+        info={
+            "services": services,
+            "services_str": ", ".join(services),
+        }
+    )
+
+def service_enable_error(service, reason, node=None):
+    """
+    system service enable failed
+    string service service name or description
+    string reason error message
+    string node node on which service was enabled
+    """
+    msg = "Unable to enable {service}: {reason}"
+    return ReportItem.error(
+        report_codes.SERVICE_ENABLE_ERROR,
+        msg if node is None else "{node}: " + msg,
+        info={
+            "service": service,
+            "reason": reason,
+            "node": node,
+        }
+    )
+
+def service_enable_success(service, node=None):
+    """
+    system service was enabled successfully
+    string service service name or description
+    string node node on which service was enabled
+    """
+    msg = "{service} enabled"
+    return ReportItem.info(
+        report_codes.SERVICE_ENABLE_SUCCESS,
+        msg if node is None else "{node}: " + msg,
+        info={
+            "service": service,
+            "node": node,
+        }
+    )
+
+def service_disable_error(service, reason, node=None):
+    """
+    system service disable failed
+    string service service name or description
+    string reason error message
+    string node node on which service was disabled
+    """
+    msg = "Unable to disable {service}: {reason}"
+    return ReportItem.error(
+        report_codes.SERVICE_DISABLE_ERROR,
+        msg if node is None else "{node}: " + msg,
+        info={
+            "service": service,
+            "reason": reason,
+            "node": node,
+        }
+    )
+
+def service_disable_success(service, node=None):
+    """
+    system service was disabled successfully
+    string service service name or description
+    string node node on which service was disabled
+    """
+    msg = "{service} disabled"
+    return ReportItem.info(
+        report_codes.SERVICE_DISABLE_SUCCESS,
+        msg if node is None else "{node}: " + msg,
+        info={
+            "service": service,
+            "node": node,
+        }
+    )
+
+
+def invalid_metadata_format(severity=ReportItemSeverity.ERROR, forceable=None):
+    """
+    Invalid format of metadata
+    """
+    return ReportItem(
+        report_codes.INVALID_METADATA_FORMAT,
+        severity,
+        "Invalid metadata format",
+        forceable=forceable
+    )
+
+
+def unable_to_get_agent_metadata(
+    agent, reason, severity=ReportItemSeverity.ERROR, forceable=None
+):
+    """
+    There were some issues trying to get metadata of agent
+
+    agent -- agent which metadata were unable to obtain
+    reason -- reason of failure
+    """
+    return ReportItem(
+        report_codes.UNABLE_TO_GET_AGENT_METADATA,
+        severity,
+        "Unable to get metadata of '{agent}': {reason}",
+        info={
+            "agent": agent,
+            "reason": reason
+        },
+        forceable=forceable
+    )
+
+
+def agent_not_found(agent, severity=ReportItemSeverity.ERROR, forceable=None):
+    """
+    Specified agent doesn't exist
+
+    agent -- name of agent which doesn't exist
+    """
+    return ReportItem(
+        report_codes.AGENT_NOT_FOUND,
+        severity,
+        "Agent '{agent}' not found",
+        info={"agent": agent},
+        forceable=forceable
+    )
+
+
+def agent_not_supported(
+    agent, severity=ReportItemSeverity.ERROR, forceable=None
+):
+    """
+    Specified agent is not supported
+
+    agent -- name of agent which is not supported
+    """
+    return ReportItem(
+        report_codes.UNSUPPORTED_AGENT,
+        severity,
+        "Agent '{agent}' is not supported",
+        info={"agent": agent},
+        forceable=forceable
+    )
+
+
+def resource_agent_general_error(agent=None):
+    """
+    General not specific error of resource or fence agent.
+
+    agent -- agent name
+    """
+    msg = "Unspecified problem of resource/fence agent"
+    return ReportItem.error(
+        report_codes.AGENT_GENERAL_ERROR,
+        msg if agent is None else msg + " '{agent}'",
+        info={"agent": agent}
+    )
+
+
+def omitting_node(node):
+    """
+    warning that specified node will be omitted in following actions
+
+    node -- node name
+    """
+    return ReportItem.warning(
+        report_codes.OMITTING_NODE,
+        "Omitting node '{node}'",
+        info={"node": node}
+    )
+
+
+def sbd_check_started():
+    """
+    info that SBD pre-enabling checks started
+    """
+    return ReportItem.info(
+        report_codes.SBD_CHECK_STARTED,
+        "Running SBD pre-enabling checks..."
+    )
+
+
+def sbd_check_success(node):
+    """
+    info that SBD pre-enabling check finished without issues on specified node
+
+    node -- node name
+    """
+    return ReportItem.info(
+        report_codes.SBD_CHECK_SUCCESS,
+        "{node}: SBD pre-enabling checks done",
+        info={"node": node}
+    )
+
+
+def sbd_config_distribution_started():
+    """
+    distribution of SBD configuration started
+    """
+    return ReportItem.info(
+        report_codes.SBD_CONFIG_DISTRIBUTION_STARTED,
+        "Distributing SBD config..."
+    )
+
+
+def sbd_config_accepted_by_node(node):
+    """
+    info that SBD configuration has been saved successfully on specified node
+
+    node -- node name
+    """
+    return ReportItem.info(
+        report_codes.SBD_CONFIG_ACCEPTED_BY_NODE,
+        "{node}: SBD config saved",
+        info={"node": node}
+    )
+
+
+def unable_to_get_sbd_config(node, reason, severity=ReportItemSeverity.ERROR):
+    """
+    unable to get SBD config from specified node (communication or parsing
+    error)
+
+    node -- node name
+    reason -- reason of failure
+    """
+    return ReportItem(
+        report_codes.UNABLE_TO_GET_SBD_CONFIG,
+        severity,
+        "Unable to get SBD configuration from node '{node}': {reason}",
+        info={
+            "node": node,
+            "reason": reason
+        }
+    )
+
+
+def sbd_enabling_started():
+    """
+    enabling SBD service started
+    """
+    return ReportItem.info(
+        report_codes.SBD_ENABLING_STARTED,
+        "Enabling SBD service..."
+    )
+
+
+def sbd_disabling_started():
+    """
+    disabling SBD service started
+    """
+    return ReportItem.info(
+        report_codes.SBD_DISABLING_STARTED,
+        "Disabling SBD service..."
+    )
+
+
+def invalid_response_format(node):
+    """
+    error message that response in invalid format has been received from
+    specified node
+
+    node -- node name
+    """
+    return ReportItem.error(
+        report_codes.INVALID_RESPONSE_FORMAT,
+        "{node}: Invalid format of response",
+        info={"node": node}
+    )
+
+
+def sbd_not_installed(node):
+    """
+    sbd is not installed on specified node
+
+    node -- node name
+    """
+    return ReportItem.error(
+        report_codes.SBD_NOT_INSTALLED,
+        "SBD is not installed on node '{node}'",
+        info={"node": node}
+    )
+
+
+def watchdog_not_found(node, watchdog):
+    """
+    watchdog doesn't exist on specified node
+
+    node -- node name
+    watchdog -- watchdog device path
+    """
+    return ReportItem.error(
+        report_codes.WATCHDOG_NOT_FOUND,
+        "Watchdog '{watchdog}' does not exist on node '{node}'",
+        info={
+            "node": node,
+            "watchdog": watchdog
+        }
+    )
+
+
+def unable_to_get_sbd_status(node, reason):
+    """
+    there was (communication or parsing) failure during obtaining status of SBD
+    from specified node
+
+    node -- node name
+    reason -- reason of failure
+    """
+    return ReportItem.warning(
+        report_codes.UNABLE_TO_GET_SBD_STATUS,
+        "Unable to get status of SBD from node '{node}': {reason}",
+        info={
+            "node": node,
+            "reason": reason
+        }
+    )
+
+def cluster_restart_required_to_apply_changes():
+    """
+    warn user a cluster needs to be manually restarted to use new configuration
+    """
+    return ReportItem.warning(
+        report_codes.CLUSTER_RESTART_REQUIRED_TO_APPLY_CHANGES,
+        "Cluster restart is required in order to apply these changes."
+    )
diff --git a/pcs/lib/resource_agent.py b/pcs/lib/resource_agent.py
index a3c6650..ea93875 100644
--- a/pcs/lib/resource_agent.py
+++ b/pcs/lib/resource_agent.py
@@ -9,31 +9,40 @@ import os
 from lxml import etree
 
 from pcs import settings
-from pcs.common import report_codes
-from pcs.lib.errors import LibraryError
-from pcs.lib.errors import ReportItem
+from pcs.lib import reports
+from pcs.lib.errors import ReportItemSeverity
 from pcs.lib.pacemaker_values import is_true
 from pcs.lib.external import is_path_runnable
+from pcs.common import report_codes
 from pcs.common.tools import simple_cache
 
 
-class UnsupportedResourceAgent(LibraryError):
+class ResourceAgentLibError(Exception):
     pass
 
 
-class InvalidAgentName(LibraryError):
-    pass
+class ResourceAgentCommonError(ResourceAgentLibError):
+    # pylint: disable=super-init-not-called
+    def __init__(self, agent):
+        self.agent = agent
 
 
-class AgentNotFound(LibraryError):
+class UnsupportedResourceAgent(ResourceAgentCommonError):
     pass
 
 
-class UnableToGetAgentMetadata(LibraryError):
+class AgentNotFound(ResourceAgentCommonError):
     pass
 
 
-class InvalidMetadataFormat(LibraryError):
+class UnableToGetAgentMetadata(ResourceAgentCommonError):
+    # pylint: disable=super-init-not-called
+    def __init__(self, agent, message):
+        self.agent = agent
+        self.message = message
+
+
+class InvalidMetadataFormat(ResourceAgentLibError):
     pass
 
 
@@ -48,14 +57,6 @@ def __get_text_from_dom_element(element):
         return element.text.strip()
 
 
-def __get_invalid_metadata_format_exception():
-    return InvalidMetadataFormat(ReportItem.error(
-        report_codes.INVALID_METADATA_FORMAT,
-        "invalid agent metadata format",
-        forceable=True
-    ))
-
-
 def _get_parameter(parameter_dom):
     """
     Returns dictionary that describes parameter.
@@ -68,11 +69,12 @@ def _get_parameter(parameter_dom):
         default: default value,
         required: True if is required parameter, False otherwise
     }
+    Raises InvalidMetadataFormat if parameter_dom is not in valid format
 
     parameter_dom -- parameter dom element
     """
     if parameter_dom.tag != "parameter" or parameter_dom.get("name") is None:
-        raise __get_invalid_metadata_format_exception()
+        raise InvalidMetadataFormat()
 
     longdesc = __get_text_from_dom_element(parameter_dom.find("longdesc"))
     shortdesc = __get_text_from_dom_element(parameter_dom.find("shortdesc"))
@@ -95,12 +97,13 @@ def _get_parameter(parameter_dom):
 
 def _get_agent_parameters(metadata_dom):
     """
-    Returns list of parameters from agents metadata
+    Returns list of parameters from agents metadata.
+    Raises InvalidMetadataFormat if metadata_dom is not in valid format.
 
     metadata_dom -- agent's metadata dom
     """
     if metadata_dom.tag != "resource-agent":
-        raise __get_invalid_metadata_format_exception()
+        raise InvalidMetadataFormat()
 
     params_el = metadata_dom.find("parameters")
     if params_el is None:
@@ -112,19 +115,21 @@ def _get_agent_parameters(metadata_dom):
 
 
 def _get_pcmk_advanced_stonith_parameters(runner):
-    """Returns advanced instance attributes for stonith devices"""
+    """
+    Returns advanced instance attributes for stonith devices
+    Raises UnableToGetAgentMetadata if there is problem with obtaining
+        metadata of stonithd.
+    Raises InvalidMetadataFormat if obtained metadata are not in valid format.
+
+    runner -- CommandRunner
+    """
     @simple_cache
     def __get_stonithd_parameters():
         output, retval = runner.run(
             [settings.stonithd_binary, "metadata"], ignore_stderr=True
         )
         if output.strip() == "":
-            raise UnableToGetAgentMetadata(ReportItem.error(
-                report_codes.UNABLE_TO_GET_AGENT_METADATA,
-                "unable to get metadata of stonithd",
-                info={"external_exitcode": retval, "external_output": output},
-                forceable=True
-            ))
+            raise UnableToGetAgentMetadata("stonithd", output)
 
         try:
             params = _get_agent_parameters(etree.fromstring(output))
@@ -136,7 +141,7 @@ def _get_pcmk_advanced_stonith_parameters(runner):
                 param["advanced"] = is_advanced
             return params
         except etree.XMLSyntaxError:
-            raise __get_invalid_metadata_format_exception()
+            raise InvalidMetadataFormat()
 
     return __get_stonithd_parameters()
 
@@ -144,18 +149,14 @@ def _get_pcmk_advanced_stonith_parameters(runner):
 def get_fence_agent_metadata(runner, fence_agent):
     """
     Returns dom of metadata for specified fence agent
+    Raises AgentNotFound if fence_agent doesn't starts with fence_ or it is
+        relative path or file is not runnable.
+    Raises UnableToGetAgentMetadata if there was problem getting or
+        parsing metadata.
 
+    runner -- CommandRunner
     fence_agent -- fence agent name, should start with 'fence_'
     """
-
-    def __get_error(info):
-        return UnableToGetAgentMetadata(ReportItem.error(
-            report_codes.UNABLE_TO_GET_AGENT_METADATA,
-            "unable to get metadata of fence agent '{agent_name}'",
-            info=info,
-            forceable=True
-        ))
-
     script_path = os.path.join(settings.fence_agent_binaries, fence_agent)
 
     if not (
@@ -163,36 +164,27 @@ def get_fence_agent_metadata(runner, fence_agent):
         __is_path_abs(script_path) and
         is_path_runnable(script_path)
     ):
-        raise AgentNotFound(ReportItem.error(
-            report_codes.INVALID_RESOURCE_NAME,
-            "fence agent '{agent_name}' not found",
-            info={"agent_name": fence_agent},
-            forceable=True
-        ))
+        raise AgentNotFound(fence_agent)
 
     output, retval = runner.run(
         [script_path, "-o", "metadata"], ignore_stderr=True
     )
 
     if output.strip() == "":
-        raise __get_error({
-            "agent_name": fence_agent,
-            "external_exitcode": retval,
-            "external_output": output
-        })
+        raise UnableToGetAgentMetadata(fence_agent, output)
 
     try:
         return etree.fromstring(output)
     except etree.XMLSyntaxError as e:
-        raise __get_error({
-            "agent_name": fence_agent,
-            "error_info": str(e)
-        })
+        raise UnableToGetAgentMetadata(fence_agent, str(e))
 
 
 def _get_nagios_resource_agent_metadata(agent):
     """
-    Returns metadata dom for specified nagios resource agent
+    Returns metadata dom for specified nagios resource agent.
+    Raises AgentNotFound if agent is relative path.
+    Raises UnableToGetAgentMetadata if there was problem getting or
+        parsing metadata.
 
     agent -- name of nagios resource agent
     """
@@ -200,54 +192,32 @@ def _get_nagios_resource_agent_metadata(agent):
     metadata_path = os.path.join(settings.nagios_metadata_path, agent + ".xml")
 
     if not __is_path_abs(metadata_path):
-        raise AgentNotFound(ReportItem.error(
-            report_codes.INVALID_RESOURCE_NAME,
-            "resource agent '{agent_name}' not found",
-            info={"agent_name": agent_name},
-            forceable=True
-        ))
+        raise AgentNotFound(agent_name)
 
     try:
         return etree.parse(metadata_path).getroot()
     except Exception as e:
-        raise UnableToGetAgentMetadata(ReportItem.error(
-            report_codes.UNABLE_TO_GET_AGENT_METADATA,
-            "unable to get metadata of resource agent '{agent_name}': " +
-            "{error_info}",
-            info={
-                "agent_name": agent_name,
-                "error_info": str(e)
-            },
-            forceable=True
-        ))
+        raise UnableToGetAgentMetadata(agent_name, str(e))
 
 
 def _get_ocf_resource_agent_metadata(runner, provider, agent):
     """
     Returns metadata dom for specified ocf resource agent
+    Raises AgentNotFound if specified agent is relative path or file is not
+        runnable.
+    Raises UnableToGetAgentMetadata if there was problem getting or
+    parsing metadata.
 
+    runner -- CommandRunner
     provider -- resource agent provider
     agent -- resource agent name
     """
     agent_name = "ocf:" + provider + ":" + agent
 
-    def __get_error(info):
-        return UnableToGetAgentMetadata(ReportItem.error(
-            report_codes.UNABLE_TO_GET_AGENT_METADATA,
-            "unable to get metadata of resource agent '{agent_name}'",
-            info=info,
-            forceable=True
-        ))
-
     script_path = os.path.join(settings.ocf_resources, provider, agent)
 
     if not __is_path_abs(script_path) or not is_path_runnable(script_path):
-        raise AgentNotFound(ReportItem.error(
-            report_codes.INVALID_RESOURCE_NAME,
-            "resource agent '{agent_name}' not found",
-            info={"agent_name": agent_name},
-            forceable=True
-        ))
+        raise AgentNotFound(agent_name)
 
     output, retval = runner.run(
         [script_path, "meta-data"],
@@ -256,19 +226,12 @@ def _get_ocf_resource_agent_metadata(runner, provider, agent):
     )
 
     if output.strip() == "":
-        raise __get_error({
-            "agent_name": agent_name,
-            "external_exitcode": retval,
-            "external_output": output
-        })
+        raise UnableToGetAgentMetadata(agent_name, output)
 
     try:
         return etree.fromstring(output)
     except etree.XMLSyntaxError as e:
-        raise __get_error({
-            "agent_name": agent_name,
-            "error_info": str(e)
-        })
+        raise UnableToGetAgentMetadata(agent_name, str(e))
 
 
 def get_agent_desc(metadata_dom):
@@ -279,11 +242,12 @@ def get_agent_desc(metadata_dom):
         longdesc: long description
         shortdesc: short description
     }
+    Raises InvalidMetadataFormat if metadata_dom is not in valid format.
 
     metadata_dom -- metadata dom of agent
     """
     if metadata_dom.tag != "resource-agent":
-        raise __get_invalid_metadata_format_exception()
+        raise InvalidMetadataFormat()
 
     shortdesc_el = metadata_dom.find("shortdesc")
     if shortdesc_el is None:
@@ -304,7 +268,17 @@ def _filter_fence_agent_parameters(parameters):
 
     parameters -- list of fence agent parameters
     """
-    banned_parameters = ["debug", "action", "verbose", "version", "help"]
+    # we don't allow user to change these options, they are intended
+    # to be used interactively (command line), there is no point setting them
+    banned_parameters = ["debug", "verbose", "version", "help"]
+    # but still, we have to let user change 'action' because of backward
+    # compatibility, just marking it as not required
+    for param in parameters:
+        if param["name"] == "action":
+            param["shortdesc"] = param.get("shortdesc", "") + "\nWARNING: " +\
+                "specifying 'action' is deprecated and not necessary with " +\
+                "current Pacemaker versions"
+            param["required"] = False
     return [
         param for param in parameters if param["name"] not in banned_parameters
     ]
@@ -314,6 +288,7 @@ def get_fence_agent_parameters(runner, metadata_dom):
     """
     Returns complete list of parameters for fence agent from it's metadata.
 
+    runner -- CommandRunner
     metadata_dom -- metadata dom of fence agent
     """
     return (
@@ -335,15 +310,13 @@ def get_resource_agent_parameters(metadata_dom):
 def get_resource_agent_metadata(runner, agent):
     """
     Returns metadata of specified agent as dom
+    Raises UnsupportedResourceAgent if specified agent is not ocf or nagios
+        agent.
 
+    runner -- CommandRunner
     agent -- agent name
     """
-    error = UnsupportedResourceAgent(ReportItem.error(
-        report_codes.UNSUPPORTED_RESOURCE_AGENT,
-        "resource agent '{agent}' is not supported",
-        info={"agent": agent},
-        forceable=True
-    ))
+    error = UnsupportedResourceAgent(agent)
     if agent.startswith("ocf:"):
         agent_info = agent.split(":", 2)
         if len(agent_info) != 3:
@@ -359,11 +332,12 @@ def _get_action(action_el):
     """
     Returns XML action element as dictionary, where all elements attributes
     are key of dict
+    Raises InvalidMetadataFormat if action_el is not in valid format.
 
     action_el -- action lxml.etree element
     """
     if action_el.tag != "action" or action_el.get("name") is None:
-        raise __get_invalid_metadata_format_exception()
+        raise InvalidMetadataFormat()
 
     return dict(action_el.items())
 
@@ -371,11 +345,12 @@ def _get_action(action_el):
 def get_agent_actions(metadata_dom):
     """
     Returns list of actions from agents metadata
+    Raises InvalidMetadataFormat if metadata_dom is not in valid format.
 
     metadata_dom -- agent's metadata dom
     """
     if metadata_dom.tag != "resource-agent":
-        raise __get_invalid_metadata_format_exception()
+        raise InvalidMetadataFormat()
 
     actions_el = metadata_dom.find("actions")
     if actions_el is None:
@@ -402,6 +377,7 @@ def validate_instance_attributes(runner, instance_attrs, agent):
     Validates instance attributes according to specified agent.
     Returns tuple of lists (<invalid attributes>, <missing required attributes>)
 
+    runner -- CommandRunner
     instance_attrs -- dictionary of instance attributes, where key is
         attribute name and value is attribute value
     agent -- full name (<class>:<agent> or <class>:<provider>:<agent>)
@@ -425,3 +401,35 @@ def validate_instance_attributes(runner, instance_attrs, agent):
             get_resource_agent_metadata(runner, agent)
         )
         return _validate_instance_attributes(agent_params, instance_attrs)
+
+
+def resource_agent_lib_error_to_report_item(
+    e, severity=ReportItemSeverity.ERROR, forceable=False
+):
+    """
+    Transform ResourceAgentLibError to ReportItem
+    """
+    force = None
+    if e.__class__ == AgentNotFound:
+        if severity == ReportItemSeverity.ERROR and forceable:
+            force = report_codes.FORCE_UNKNOWN_AGENT
+        return reports.agent_not_found(e.agent, severity, force)
+    if e.__class__ == UnsupportedResourceAgent:
+        if severity == ReportItemSeverity.ERROR and forceable:
+            force = report_codes.FORCE_UNSUPPORTED_AGENT
+        return reports.agent_not_supported(e.agent, severity, force)
+    if e.__class__ == UnableToGetAgentMetadata:
+        if severity == ReportItemSeverity.ERROR and forceable:
+            force = report_codes.FORCE_METADATA_ISSUE
+        return reports.unable_to_get_agent_metadata(
+            e.agent, e.message, severity, force
+        )
+    if e.__class__ == InvalidMetadataFormat:
+        if severity == ReportItemSeverity.ERROR and forceable:
+            force = report_codes.FORCE_METADATA_ISSUE
+        return reports.invalid_metadata_format(severity, force)
+    if e.__class__ == ResourceAgentCommonError:
+        return reports.resource_agent_general_error(e.agent)
+    if e.__class__ == ResourceAgentLibError:
+        return reports.resource_agent_general_error()
+    raise e
diff --git a/pcs/lib/sbd.py b/pcs/lib/sbd.py
new file mode 100644
index 0000000..1330bfc
--- /dev/null
+++ b/pcs/lib/sbd.py
@@ -0,0 +1,364 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import json
+
+from pcs import settings
+from pcs.common import tools
+from pcs.lib import (
+    external,
+    reports,
+)
+from pcs.lib.tools import dict_to_environment_file
+from pcs.lib.external import (
+    NodeCommunicator,
+    node_communicator_exception_to_report_item,
+    NodeCommunicationException,
+)
+from pcs.lib.errors import LibraryError
+
+
+def _run_parallel_and_raise_lib_error_on_failure(func, param_list):
+    """
+    Run function func in parallel for all specified parameters in arg_list.
+    Raise LibraryError on any failure.
+
+    func -- function to be run
+    param_list -- list of tuples: (*args, **kwargs)
+    """
+    report_list = []
+
+    def _parallel(*args, **kwargs):
+        try:
+            func(*args, **kwargs)
+        except NodeCommunicationException as e:
+            report_list.append(node_communicator_exception_to_report_item(e))
+        except LibraryError as e:
+            report_list.extend(e.args)
+
+    tools.run_parallel(_parallel, param_list)
+
+    if report_list:
+        raise LibraryError(*report_list)
+
+
+def check_sbd(communicator, node, watchdog):
+    """
+    Check SBD on specified 'node' and existence of specified watchdog.
+
+    communicator -- NodeCommunicator
+    node -- NodeAddresses
+    watchdog -- watchdog path
+    """
+    return communicator.call_node(
+        node,
+        "remote/check_sbd",
+        NodeCommunicator.format_data_dict({"watchdog": watchdog})
+    )
+
+
+def check_sbd_on_node(report_processor, node_communicator, node, watchdog):
+    """
+    Check if SBD can be enabled on specified 'node'.
+    Raises LibraryError if check fails.
+    Raises NodeCommunicationException if there is communication issue.
+
+    report_processor --
+    node_communicator -- NodeCommunicator
+    node -- NodeAddresses
+    watchdog -- watchdog path
+    """
+    report_list = []
+    try:
+        data = json.loads(check_sbd(node_communicator, node, watchdog))
+        if not data["sbd"]["installed"]:
+            report_list.append(reports.sbd_not_installed(node.label))
+        if not data["watchdog"]["exist"]:
+            report_list.append(reports.watchdog_not_found(node.label, watchdog))
+    except (ValueError, KeyError):
+        raise LibraryError(reports.invalid_response_format(node.label))
+
+    if report_list:
+        raise LibraryError(*report_list)
+    report_processor.process(reports.sbd_check_success(node.label))
+
+
+def check_sbd_on_all_nodes(report_processor, node_communicator, nodes_watchdog):
+    """
+    Checks SBD (if SBD is installed and watchdog exists) on all NodeAddresses
+        defined as keys in data.
+    Raises LibraryError with all ReportItems in case of any failure.
+
+    report_processor --
+    node_communicator -- NodeCommunicator
+    nodes_watchdog -- dictionary with NodeAddresses as keys and watchdog path
+        as value
+    """
+    report_processor.process(reports.sbd_check_started())
+    _run_parallel_and_raise_lib_error_on_failure(
+        check_sbd_on_node,
+        [
+            ([report_processor, node_communicator, node, watchdog], {})
+            for node, watchdog in sorted(nodes_watchdog.items())
+        ]
+    )
+
+
+def set_sbd_config(communicator, node, config):
+    """
+    Send SBD configuration to 'node'.
+
+    communicator -- NodeCommunicator
+    node -- NodeAddresses
+    config -- string, SBD configuration file
+    """
+    communicator.call_node(
+        node,
+        "remote/set_sbd_config",
+        NodeCommunicator.format_data_dict({"config": config})
+    )
+
+
+def set_sbd_config_on_node(report_processor, node_communicator, node, config):
+    """
+    Send SBD configuration to 'node'. Also puts correct node name into
+        SBD_OPTS option (SBD_OPTS="-n <node_name>").
+
+    report_processor --
+    node_communicator -- NodeCommunicator
+    node -- NodeAddresses
+    config -- dictionary in format: <SBD config option>: <value>
+    """
+    config = dict(config)
+    config["SBD_OPTS"] = '"-n {node_name}"'.format(node_name=node.label)
+    set_sbd_config(node_communicator, node, dict_to_environment_file(config))
+    report_processor.process(
+        reports.sbd_config_accepted_by_node(node.label)
+    )
+
+
+def set_sbd_config_on_all_nodes(
+        report_processor, node_communicator, node_list, config
+):
+    """
+    Send SBD configuration 'config' to all nodes in 'node_list'. Option
+        SBD_OPTS="-n <node_name>" is added automatically.
+    Raises LibraryError with all ReportItems in case of any failure.
+
+    report_processor --
+    node_communicator -- NodeCommunicator
+    node_list -- NodeAddressesList
+    config -- dictionary in format: <SBD config option>: <value>
+    """
+    report_processor.process(reports.sbd_config_distribution_started())
+    _run_parallel_and_raise_lib_error_on_failure(
+        set_sbd_config_on_node,
+        [
+            ([report_processor, node_communicator, node, config], {})
+            for node in node_list
+        ]
+    )
+
+
+def enable_sbd_service(communicator, node):
+    """
+    Enable SBD service on 'node'.
+
+    communicator -- NodeCommunicator
+    node -- NodeAddresses
+    """
+    communicator.call_node(node, "remote/sbd_enable", "")
+
+
+def enable_sbd_service_on_node(report_processor, node_communicator, node):
+    """
+    Enable SBD service on 'node'.
+    Returns list of ReportItem if there was any failure. Empty list otherwise.
+
+    report_processor --
+    node_communicator -- NodeCommunicator
+    node -- NodeAddresses
+    """
+    enable_sbd_service(node_communicator, node)
+    report_processor.process(reports.service_enable_success("sbd", node.label))
+
+
+def enable_sbd_service_on_all_nodes(
+        report_processor, node_communicator, node_list
+):
+    """
+    Enable SBD service on all nodes in 'node_list'.
+    Raises LibraryError with all ReportItems in case of any failure.
+
+    report_processor --
+    node_communicator -- NodeCommunicator
+    node_list -- NodeAddressesList
+    """
+    report_processor.process(reports.sbd_enabling_started())
+    _run_parallel_and_raise_lib_error_on_failure(
+        enable_sbd_service_on_node,
+        [
+            ([report_processor, node_communicator, node], {})
+            for node in node_list
+        ]
+    )
+
+
+def disable_sbd_service(communicator, node):
+    """
+    Disable SBD service on 'node'.
+
+    communicator -- NodeCommunicator
+    node -- NodeAddresses
+    """
+    communicator.call_node(node, "remote/sbd_disable", "")
+
+
+def disable_sbd_service_on_node(report_processor, node_communicator, node):
+    """
+    Disable SBD service on 'node'.
+
+    report_processor --
+    node_communicator -- NodeCommunicator
+    node -- NodeAddresses
+    """
+    disable_sbd_service(node_communicator, node)
+    report_processor.process(reports.service_disable_success("sbd", node.label))
+
+
+def disable_sbd_service_on_all_nodes(
+    report_processor, node_communicator, node_list
+):
+    """
+    Disable SBD service on all nodes in 'node_list'.
+    Raises LibraryError with all ReportItems in case of any failure.
+
+    report_processor --
+    node_communicator -- NodeCommunicator
+    node_list -- NodeAddressesList
+    """
+    report_processor.process(reports.sbd_disabling_started())
+    _run_parallel_and_raise_lib_error_on_failure(
+        disable_sbd_service_on_node,
+        [
+            ([report_processor, node_communicator, node], {})
+            for node in node_list
+        ]
+    )
+
+
+def set_stonith_watchdog_timeout_to_zero(communicator, node):
+    """
+    Set cluster property 'stonith-watchdog-timeout' to value '0' on 'node'.
+
+    communicator -- NodeCommunicator
+    node -- NodeAddresses
+    """
+    communicator.call_node(
+        node, "remote/set_stonith_watchdog_timeout_to_zero", ""
+    )
+
+
+def set_stonith_watchdog_timeout_to_zero_on_all_nodes(
+    node_communicator, node_list
+):
+    """
+    Sets cluster property 'stonith-watchdog-timeout' to value '0' an all nodes
+        in 'node_list', even if cluster is not currently running on them (direct
+        editing CIB file).
+    Raises LibraryError with all ReportItems in case of any failure.
+
+    node_communicator -- NodeCommunicator
+    node_list -- NodeAddressesList
+    """
+    report_list = []
+    for node in node_list:
+        try:
+            set_stonith_watchdog_timeout_to_zero(node_communicator, node)
+        except NodeCommunicationException as e:
+            report_list.append(node_communicator_exception_to_report_item(e))
+    if report_list:
+        raise LibraryError(*report_list)
+
+
+def remove_stonith_watchdog_timeout(communicator, node):
+    """
+    Remove cluster property 'stonith-watchdog-timeout' on 'node'.
+
+    communicator -- NodeCommunicator
+    node -- NodeAddresses
+    """
+    communicator.call_node(node, "remote/remove_stonith_watchdog_timeout", "")
+
+
+def remove_stonith_watchdog_timeout_on_all_nodes(node_communicator, node_list):
+    """
+    Removes cluster property 'stonith-watchdog-timeout' from all nodes
+        in 'node_list', even if cluster is not currently running on them (direct
+        editing CIB file).
+    Raises LibraryError with all ReportItems in case of any failure.
+
+    node_communicator -- NodeCommunicator
+    node_list -- NodeAddressesList
+    """
+    report_list = []
+    for node in node_list:
+        try:
+            remove_stonith_watchdog_timeout(node_communicator, node)
+        except NodeCommunicationException as e:
+            report_list.append(node_communicator_exception_to_report_item(e))
+    if report_list:
+        raise LibraryError(*report_list)
+
+
+def get_default_sbd_config():
+    """
+    Returns default SBD configuration as dictionary.
+    """
+    return {
+        "SBD_DELAY_START": "no",
+        "SBD_PACEMAKER": "yes",
+        "SBD_STARTMODE": "clean",
+        "SBD_WATCHDOG_DEV": settings.sbd_watchdog_default,
+        "SBD_WATCHDOG_TIMEOUT": "5"
+    }
+
+
+def get_local_sbd_config():
+    """
+    Get local SBD configuration.
+    Returns SBD configuration file as string.
+    Raises LibraryError on any failure.
+    """
+    try:
+        with open(settings.sbd_config, "r") as sbd_cfg:
+            return sbd_cfg.read()
+    except EnvironmentError as e:
+        raise LibraryError(reports.unable_to_get_sbd_config(
+            "local node", str(e)
+        ))
+
+
+def get_sbd_config(communicator, node):
+    """
+    Get SBD configuration from 'node'.
+    Returns SBD configuration string.
+
+    communicator -- NodeCommunicator
+    node -- NodeAddresses
+    """
+    return communicator.call_node(node, "remote/get_sbd_config", "")
+
+
+def is_sbd_enabled(runner):
+    """
+    Check if SBD service is enabled in local system.
+    Return True if SBD service is enabled, False otherwise.
+
+    runner -- CommandRunner
+    """
+    return external.is_service_enabled(runner, "sbd")
diff --git a/pcs/lib/tools.py b/pcs/lib/tools.py
new file mode 100644
index 0000000..324047b
--- /dev/null
+++ b/pcs/lib/tools.py
@@ -0,0 +1,49 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+
+def environment_file_to_dict(config):
+    """
+    Parse systemd Environment file. This parser is simplified version of
+    parser in systemd, because of their poor implementation.
+    Returns configuration in dictionary in format:
+    {
+        <option>: <value>,
+        ...
+    }
+
+    config -- Environment file as string
+    """
+    # escape new lines
+    config = config.replace("\\\n", "")
+
+    data = {}
+    for line in [l.strip() for l in config.split("\n")]:
+        if line == "" or line.startswith("#") or line.startswith(";"):
+            continue
+        if "=" not in line:
+            continue
+        key, val = line.split("=", 1)
+        value = val.strip()
+        data[key.strip()] = value
+    return data
+
+
+def dict_to_environment_file(config_dict):
+    """
+    Convert data in dictionary to Environment file format.
+    Returns Environment file as string in format:
+    # comment
+    <option>=<value>
+    ...
+
+    config_dict -- dictionary in format: { <option>: <value>, ...}
+    """
+    lines = ["# This file has been generated by pcs.\n"]
+    for key, val in sorted(config_dict.items()):
+        lines.append("{key}={val}\n".format(key=key, val=val))
+    return "".join(lines)
diff --git a/pcs/pcs.8 b/pcs/pcs.8
index ac0717f..38a4913 100644
--- a/pcs/pcs.8
+++ b/pcs/pcs.8
@@ -1,4 +1,4 @@
-.TH PCS "8" "May 2016" "pcs 0.9.151" "System Administration Utilities"
+.TH PCS "8" "June 2016" "pcs 0.9.152" "System Administration Utilities"
 .SH NAME
 pcs \- pacemaker/corosync configuration system
 .SH SYNOPSIS
@@ -9,50 +9,53 @@ Control and configure pacemaker and corosync.
 .SH OPTIONS
 .TP
 \fB\-h\fR, \fB\-\-help\fR
-Display usage and exit
+Display usage and exit.
 .TP
 \fB\-f\fR file
-Perform actions on file instead of active CIB
+Perform actions on file instead of active CIB.
 .TP
 \fB\-\-debug\fR
-Print all network traffic and external commands run
+Print all network traffic and external commands run.
 .TP
 \fB\-\-version\fR
-Print pcs version information
+Print pcs version information.
 .SS "Commands:"
 .TP
 cluster
-Configure cluster options and nodes
+Configure cluster options and nodes.
 .TP
 resource
-Manage cluster resources
+Manage cluster resources.
 .TP
 stonith
-Configure fence devices
+Configure fence devices.
 .TP
 constraint
-Set resource constraints
+Set resource constraints.
 .TP
 property
-Set pacemaker properties
+Set pacemaker properties.
 .TP
 acl
-Set pacemaker access control lists
+Set pacemaker access control lists.
+.TP
+qdevice
+Manage quorum device provider on the local host.
 .TP
 quorum
-Manage cluster quorum settings
+Manage cluster quorum settings.
 .TP
 status
-View cluster status
+View cluster status.
 .TP
 config
-View and manage cluster configuration
+View and manage cluster configuration.
 .TP
 pcsd
-Manage pcs daemon
+Manage pcs daemon.
 .TP
 node
-Manage cluster nodes
+Manage cluster nodes.
 .SS "resource"
 .TP
 [show [resource id]] [\fB\-\-full\fR] [\fB\-\-groups\fR]
@@ -62,7 +65,7 @@ list [<standard|provider|type>] [\fB\-\-nodesc\fR]
 Show list of all available resources, optionally filtered by specified type, standard or provider. If \fB\-\-nodesc\fR is used then descriptions of resources are not printed.
 .TP
 describe <standard:provider:type|type>
-Show options for the specified resource
+Show options for the specified resource.
 .TP
 create <resource id> <standard:provider:type|type> [resource options] [op <operation action> <operation options> [<operation action> <operation options>]...] [meta <meta options>...] [\fB\-\-clone\fR <clone options> | \fB\-\-master\fR <master options> | \fB\-\-group\fR <group id> [\fB\-\-before\fR <resource id> | \fB\-\-after\fR <resource id>]] [\fB\-\-disabled\fR] [\fB\-\-wait\fR[=n]]
 Create specified resource.  If \fB\-\-clone\fR is used a clone resource is created.  If \fB\-\-master\fR is specified a master/slave resource is created.  If \fB\-\-group\fR is specified the resource is added to the group named.  You can use \fB\-\-before\fR or \fB\-\-after\fR to specify the position of the added resource relatively to some resource already existing in the group.  If \fB\-\-disabled\fR is specified the resource is not started automatically.  If \fB\-\-wait\fR is specifie [...]
@@ -106,28 +109,28 @@ clear <resource id> [node] [\fB\-\-master\fR] [\fB\-\-wait\fR[=n]]
 Remove constraints created by move and/or ban on the specified resource (and node if specified). If \fB\-\-master\fR is used the scope of the command is limited to the master role and you must use the master id (instead of the resource id).  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including starting and/or moving resources if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
 .TP
 standards
-List available resource agent standards supported by this installation. (OCF, LSB, etc.)
+List available resource agent standards supported by this installation (OCF, LSB, etc.).
 .TP
 providers
-List available OCF resource agent providers
+List available OCF resource agent providers.
 .TP
 agents [standard[:provider]]
-List available agents optionally filtered by standard and provider
+List available agents optionally filtered by standard and provider.
 .TP
 update <resource id> [resource options] [op [<operation action> <operation options>]...] [meta <meta operations>...] [\fB\-\-wait\fR[=n]]
 Add/Change options to specified resource, clone or multi\-state resource.  If an operation (op) is specified it will update the first found operation with the same action on the specified resource, if no operation with that action exists then a new operation will be created.  (WARNING: all existing options on the updated operation will be reset if not specified.)  If you want to create multiple monitor operations you should use the 'op add' & 'op remove' commands.  If \fB\-\-wait\fR is s [...]
 .TP
 op add <resource id> <operation action> [operation properties]
-Add operation for specified resource
+Add operation for specified resource.
 .TP
 op remove <resource id> <operation action> [<operation properties>...]
 Remove specified operation (note: you must specify the exact operation properties to properly remove an existing operation).
 .TP
 op remove <operation id>
-Remove the specified operation id
+Remove the specified operation id.
 .TP
 op defaults [options]
-Set default values for operations, if no options are passed, lists currently configured defaults
+Set default values for operations, if no options are passed, lists currently configured defaults.
 .TP
 meta <resource id | group id | master id | clone id> <meta options> [\fB\-\-wait\fR[=n]]
 Add specified options to the specified resource, group, master/slave or clone.  Meta options should be in the format of name=value, options may be removed by setting an option without a value.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the changes to take effect and then return 0 if the changes have been processed or 1 otherwise.  If 'n' is not specified it defaults to 60 minutes.  Example: pcs resource meta TestResource failure\-timeout=50 stickiness=
@@ -139,7 +142,7 @@ group remove <group id> <resource id> [resource id] ... [resource id] [\fB\-\-wa
 Remove the specified resource(s) from the group, removing the group if it no resources remain.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including moving resources if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
 .TP
 ungroup <group id> [resource id] ... [resource id] [\fB\-\-wait\fR[=n]]
-Remove the group (Note: this does not remove any resources from the cluster) or if resources are specified, remove the specified resources from the group.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including moving resources if appropriate) and the return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
+Remove the group (note: this does not remove any resources from the cluster) or if resources are specified, remove the specified resources from the group.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including moving resources if appropriate) and the return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
 .TP
 clone <resource id | group id> [clone options]... [\fB\-\-wait\fR[=n]]
 Setup up the specified resource or group as a clone.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including starting clone instances if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
@@ -151,19 +154,19 @@ master [<master/slave id>] <resource id | group id> [options] [\fB\-\-wait\fR[=n
 Configure a resource or group as a multi\-state (master/slave) resource.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including starting and promoting resource instances if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.  Note: to remove a master you must remove the resource/group it contains.
 .TP
 manage <resource id> ... [resource n]
-Set resources listed to managed mode (default)
+Set resources listed to managed mode (default).
 .TP
 unmanage <resource id> ... [resource n]
-Set resources listed to unmanaged mode
+Set resources listed to unmanaged mode.
 .TP
 defaults [options]
-Set default values for resources, if no options are passed, lists currently configured defaults
+Set default values for resources, if no options are passed, lists currently configured defaults.
 .TP
 cleanup [<resource id>] [\fB\-\-node\fR <node>]
 Cleans up the resource in the lrmd (useful to reset the resource status and failcount).  This tells the cluster to forget the operation history of a resource and re-detect its current state.  This can be useful to purge knowledge of past failures that have since been resolved.  If a resource id is not specified then all resources/stonith devices will be cleaned up.  If a node is not specified then resources on all nodes will be cleaned up.
 .TP
 failcount show <resource id> [node]
-Show current failcount for specified resource from all nodes or only on specified node
+Show current failcount for specified resource from all nodes or only on specified node.
 .TP
 failcount reset <resource id> [node]
 Reset failcount for specified resource on all nodes or only on specified node. This tells the cluster to forget how many times a resource has failed in the past.  This may allow the resource to be started or moved to a more preferred location.
@@ -188,13 +191,13 @@ auth [node] [...] [\fB\-u\fR username] [\fB\-p\fR password] [\fB\-\-force\fR] [\
 Authenticate pcs to pcsd on nodes specified, or on all nodes configured in corosync.conf if no nodes are specified (authorization tokens are stored in ~/.pcs/tokens or /var/lib/pcsd/tokens for root).  By default all nodes are also authenticated to each other, using \fB\-\-local\fR only authenticates the local node (and does not authenticate the remote nodes with each other).  Using \fB\-\-force\fR forces re-authentication to occur.
 .TP
 setup [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-local\fR] [\fB\-\-enable\fR] \fB\-\-name\fR <cluster name> <node1[,node1-altaddr]> [<node2[,node2-altaddr]>] [...] [\fB\-\-transport\fR udpu|udp] [\fB\-\-rrpmode\fR active|passive] [\fB\-\-addr0\fR <addr/net> [[[\fB\-\-mcast0\fR <address>] [\fB\-\-mcastport0\fR <port>] [\fB\-\-ttl0\fR <ttl>]] | [\fB\-\-broadcast0\fR]] [\fB\-\-addr1\fR <addr/net> [[[\fB\-\-mcast1\fR <address>] [\fB\-\-mcastport1\fR <port>] [\fB\-\-ttl1\fR <ttl>]] | [\ [...]
-Configure corosync and sync configuration out to listed nodes. \fB\-\-local\fR will only perform changes on the local node, \fB\-\-start\fR will also start the cluster on the specified nodes, \fB\-\-wait\fR will wait up to 'n' seconds for the nodes to start, \fB\-\-enable\fR will enable corosync and pacemaker on node startup, \fB\-\-transport\fR allows specification of corosync transport (default: udpu; udp for CMAN clusters), \fB\-\-rrpmode\fR allows you to set the RRP mode of the syste [...]
+Configure corosync and sync configuration out to listed nodes. \fB\-\-local\fR will only perform changes on the local node, \fB\-\-start\fR will also start the cluster on the specified nodes, \fB\-\-wait\fR will wait up to 'n' seconds for the nodes to start, \fB\-\-enable\fR will enable corosync and pacemaker on node startup, \fB\-\-transport\fR allows specification of corosync transport (default: udpu; udp for CMAN clusters), \fB\-\-rrpmode\fR allows you to set the RRP mode of the syste [...]
 
-\fB\-\-ipv6\fR will configure corosync to use ipv6 (instead of ipv4)
+\fB\-\-ipv6\fR will configure corosync to use ipv6 (instead of ipv4).  This option is not supported on CMAN clusters.
 
 \fB\-\-token\fR <timeout> sets time in milliseconds until a token loss is declared after not receiving a token (default 1000 ms)
 
-\fB\-\-token_coefficient\fR <timeout> sets time in milliseconds used for clusters with at least 3 nodes as a coefficient for real token timeout calculation (token + (number_of_nodes - 2) * token_coefficient) (default 650 ms)
+\fB\-\-token_coefficient\fR <timeout> sets time in milliseconds used for clusters with at least 3 nodes as a coefficient for real token timeout calculation (token + (number_of_nodes - 2) * token_coefficient) (default 650 ms)  This option is not supported on CMAN clusters.
 
 \fB\-\-join\fR <timeout> sets time in milliseconds to wait for join messages (default 50 ms)
 
@@ -227,40 +230,37 @@ stop [\fB\-\-all\fR] [node] [...]
 Stop corosync & pacemaker on specified node(s), if a node is not specified then corosync & pacemaker are stopped on the local node. If \fB\-\-all\fR is specified then corosync & pacemaker are stopped on all nodes.
 .TP
 kill
-Force corosync and pacemaker daemons to stop on the local node (performs kill \-9).
+Force corosync and pacemaker daemons to stop on the local node (performs kill \-9). Note that init system (e.g. systemd) can detect that cluster is not running and start it again. If you want to stop cluster on a node, run pcs cluster stop on that node.
 .TP
 enable [\fB\-\-all\fR] [node] [...]
 Configure corosync & pacemaker to run on node boot on specified node(s), if node is not specified then corosync & pacemaker are enabled on the local node. If \fB\-\-all\fR is specified then corosync & pacemaker are enabled on all nodes.
 .TP
 disable [\fB\-\-all\fR] [node] [...]
-Configure corosync & pacemaker to not run on node boot on specified node(s), if node is not specified then corosync & pacemaker are disabled on the local node. If \fB\-\-all\fR is specified then corosync & pacemaker are disabled on all nodes. (Note: this is the default after installation)
+Configure corosync & pacemaker to not run on node boot on specified node(s), if node is not specified then corosync & pacemaker are disabled on the local node. If \fB\-\-all\fR is specified then corosync & pacemaker are disabled on all nodes. Note: this is the default after installation.
 .TP
 remote-node add <hostname> <resource id> [options]
-Enables the specified resource as a remote-node resource on the specified hostname (hostname should be the same as 'uname -n')
+Enables the specified resource as a remote-node resource on the specified hostname (hostname should be the same as 'uname -n').
 .TP
 remote\-node remove <hostname>
-Disables any resources configured to be remote\-node resource on the specified hostname (hostname should be the same as 'uname -n')
+Disables any resources configured to be remote\-node resource on the specified hostname (hostname should be the same as 'uname -n').
 .TP
 status
-View current cluster status (an alias of 'pcs status cluster')
+View current cluster status (an alias of 'pcs status cluster').
 .TP
 pcsd\-status [node] [...]
-Get current status of pcsd on nodes specified, or on all nodes configured in corosync.conf if no nodes are specified
+Get current status of pcsd on nodes specified, or on all nodes configured in corosync.conf if no nodes are specified.
 .TP
 sync
-Sync corosync configuration to all nodes found from current corosync.conf file (cluster.conf on systems running Corosync 1.x)
-.TP
-quorum unblock
-Cancel waiting for all nodes when establishing quorum.  Useful in situations where you know the cluster is inquorate, but you are confident that the cluster should proceed with resource management regardless.
+Sync corosync configuration to all nodes found from current corosync.conf file (cluster.conf on systems running Corosync 1.x).
 .TP
 cib [filename] [scope=<scope> | \fB\-\-config\fR]
-Get the raw xml from the CIB (Cluster Information Base).  If a filename is provided, we save the cib to that file, otherwise the cib is printed.  Specify scope to get a specific section of the CIB.  Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults, status.  \fB\-\-config\fR is the same as scope=configuration.  Use of \fB\-\-config\fR is recommended.  Do not specify a scope if you need to get the whole CIB or be warned in t [...]
+Get the raw xml from the CIB (Cluster Information Base).  If a filename is provided, we save the CIB to that file, otherwise the CIB is printed.  Specify scope to get a specific section of the CIB.  Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults, status.  \fB\-\-config\fR is the same as scope=configuration.  Do not specify a scope if you want to edit the saved CIB using pcs (pcs -f <command>).
 .TP
 cib-push <filename> [scope=<scope> | \fB\-\-config\fR]
 Push the raw xml from <filename> to the CIB (Cluster Information Base).  You can obtain the CIB by running the 'pcs cluster cib' command, which is recommended first step when you want to perform desired modifications (pcs \fB\-f\fR <command>) for the one-off push.  Specify scope to push a specific section of the CIB.  Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults.  \fB\-\-config\fR is the same as scope=configuration.  U [...]
 .TP
 cib\-upgrade
-Upgrade the CIB to conform to the latest version of the document schema
+Upgrade the CIB to conform to the latest version of the document schema.
 .TP
 edit [scope=<scope> | \fB\-\-config\fR]
 Edit the cib in the editor specified by the $EDITOR environment variable and push out any changes upon saving.  Specify scope to edit a specific section of the CIB.  Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults.  \fB\-\-config\fR is the same as scope=configuration.  Use of \fB\-\-config\fR is recommended.  Do not specify a scope if you need to edit the whole CIB or be warned in the case of outdated CIB.
@@ -269,119 +269,137 @@ node add <node[,node\-altaddr]> [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\
 Add the node to corosync.conf and corosync on all nodes in the cluster and sync the new corosync.conf to the new node.  If \fB\-\-start\fR is specified also start corosync/pacemaker on the new node, if \fB\-\-wait\fR is sepcified wait up to 'n' seconds for the new node to start.  If \fB\-\-enable\fR is specified enable corosync/pacemaker on new node.  When using Redundant Ring Protocol (RRP) with udpu transport, specify the ring 0 address first followed by a ',' and then the ring 1 address.
 .TP
 node remove <node>
-Shutdown specified node and remove it from pacemaker and corosync on all other nodes in the cluster
+Shutdown specified node and remove it from pacemaker and corosync on all other nodes in the cluster.
 .TP
 uidgid
-List the current configured uids and gids of users allowed to connect to corosync
+List the current configured uids and gids of users allowed to connect to corosync.
 .TP
 uidgid add [uid=<uid>] [gid=<gid>]
-Add the specified uid and/or gid to the list of users/groups allowed to connect to corosync
+Add the specified uid and/or gid to the list of users/groups allowed to connect to corosync.
 .TP
 uidgid rm [uid=<uid>] [gid=<gid>]
-Remove the specified uid and/or gid from the list of users/groups allowed to connect to corosync
+Remove the specified uid and/or gid from the list of users/groups allowed to connect to corosync.
 .TP
 corosync [node]
-Get the corosync.conf from the specified node or from the current node if node not specified
+Get the corosync.conf from the specified node or from the current node if node not specified.
 .TP
 reload corosync
-Reload the corosync configuration on the current node
+Reload the corosync configuration on the current node.
 .TP
 destroy [\fB\-\-all\fR]
 Permanently destroy the cluster on the current node, killing all corosync/pacemaker processes removing all cib files and the corosync.conf file.  Using \fB\-\-all\fR will attempt to destroy the cluster on all nodes configure in the corosync.conf file.  WARNING: This command permantly removes any cluster configuration that has been created. It is recommended to run 'pcs cluster stop' before destroying the cluster.
 .TP
 verify [\fB\-V\fR] [filename]
-Checks the pacemaker configuration (cib) for syntax and common conceptual errors.  If no filename is specified the check is performed on the currently running cluster.  If \fB\-V\fR is used more verbose output will be printed
+Checks the pacemaker configuration (cib) for syntax and common conceptual errors.  If no filename is specified the check is performed on the currently running cluster.  If \fB\-V\fR is used more verbose output will be printed.
 .TP
 report [\fB\-\-from\fR "YYYY\-M\-D H:M:S" [\fB\-\-to\fR "YYYY\-M\-D" H:M:S"]] dest
 Create a tarball containing everything needed when reporting cluster problems.  If \fB\-\-from\fR and \fB\-\-to\fR are not used, the report will include the past 24 hours.
 .SS "stonith"
 .TP
 [show [stonith id]] [\fB\-\-full\fR]
-Show all currently configured stonith devices or if a stonith id is specified show the options for the configured stonith device.  If \fB\-\-full\fR is specified all configured stonith options will be displayed
+Show all currently configured stonith devices or if a stonith id is specified show the options for the configured stonith device.  If \fB\-\-full\fR is specified all configured stonith options will be displayed.
 .TP
 list [filter] [\fB\-\-nodesc\fR]
 Show list of all available stonith agents (if filter is provided then only stonith agents matching the filter will be shown). If \fB\-\-nodesc\fR is used then descriptions of stonith agents are not printed.
 .TP
 describe <stonith agent>
-Show options for specified stonith agent
+Show options for specified stonith agent.
 .TP
 create <stonith id> <stonith device type> [stonith device options] [op <operation action> <operation options> [<operation action> <operation options>]...] [meta <meta options>...]
-Create stonith device with specified type and options
+Create stonith device with specified type and options.
 .TP
 update <stonith id> [stonith device options]
-Add/Change options to specified stonith id
+Add/Change options to specified stonith id.
 .TP
 delete <stonith id>
-Remove stonith id from configuration
+Remove stonith id from configuration.
 .TP
 cleanup [<stonith id>] [\fB\-\-node\fR <node>]
 Cleans up the stonith device in the lrmd (useful to reset the status and failcount).  This tells the cluster to forget the operation history of a stonith device and re-detect its current state.  This can be useful to purge knowledge of past failures that have since been resolved.  If a stonith id is not specified then all resources/stonith devices will be cleaned up.  If a node is not specified then resources on all nodes will be cleaned up.
 .TP
 level
-Lists all of the fencing levels currently configured
+Lists all of the fencing levels currently configured.
 .TP
 level add <level> <node> <devices>
 Add the fencing level for the specified node with a comma separated list of devices (stonith ids) to attempt for that node at that level. Fence levels are attempted in numerical order (starting with 1) if a level succeeds (meaning all devices are successfully fenced in that level) then no other levels are tried, and the node is considered fenced.
 .TP
 level remove <level> [node id] [stonith id] ... [stonith id]
-Removes the fence level for the level, node and/or devices specified If no nodes or devices are specified then the fence level is removed
+Removes the fence level for the level, node and/or devices specified.  If no nodes or devices are specified then the fence level is removed.
 .TP
 level clear [node|stonith id(s)]
 Clears the fence levels on the node (or stonith id) specified or clears all fence levels if a node/stonith id is not specified.  If more than one stonith id is specified they must be separated by a comma and no spaces.  Example: pcs stonith level clear dev_a,dev_b
 .TP
 level verify
-Verifies all fence devices and nodes specified in fence levels exist
+Verifies all fence devices and nodes specified in fence levels exist.
 .TP
 fence <node> [\fB\-\-off\fR]
-Fence the node specified (if \fB\-\-off\fR is specified, use the 'off' API call to stonith which will turn the node off instead of rebooting it)
+Fence the node specified (if \fB\-\-off\fR is specified, use the 'off' API call to stonith which will turn the node off instead of rebooting it).
+.TP
+confirm <node> [\fB\-\-force\fR]
+Confirm that the host specified is currently down.  This command should \fBONLY\fR be used when the node specified has already been confirmed to be powered off and to have no access to shared resources.
+
+.B WARNING: If this node is not actually powered off or it does have access to shared resources, data corruption/cluster failure can occur. To prevent accidental running of this command, \-\-force or interactive user response is required in order to proceed.
+.TP
+sbd enable [\fB\-\-watchdog\fR=<path>[@<node>]] ... [<SBD_OPTION>=<value>] ...
+Enable SBD in cluster. Default path for watchdog device is /dev/watchdog. Allowed SBD options: SBD_WATCHDOG_TIMEOUT (default: 5), SBD_DELAY_START (default: no) and SBD_STARTMODE (default: clean).
+
+.B WARNING: Cluster has to be restarted in order to apply these changes.
+
+Example of enabling SBD in cluster with watchdogs on node1 will be /dev/watchdog2, on node2 /dev/watchdog1, /dev/watchdog0 on all other nodes and watchdog timeout will bet set to 10 seconds:
+
+pcs stonith sbd enable \-\-watchdog=/dev/watchdog2 at node1 \-\-watchdog=/dev/watchdog1 at node2 \-\-watchdog=/dev/watchdog0 SBD_WATCHDOG_TIMEOUT=10
+
 .TP
-confirm <node>
-Confirm that the host specified is currently down.  This command should \fBONLY\fR be used when the node specified has already been confirmed to be down.
+sbd disable
+Disable SBD in cluster.
 
-.B WARNING: if this node is not actually down data corruption/cluster failure can occur.
+.B WARNING: Cluster has to be restarted in order to apply these changes.
+.TP
+sbd status
+Show status of SBD services in cluster.
+.TP
+sbd config
+Show SBD configuration in cluster.
 .SS "acl"
 .TP
 [show]
-List all current access control lists
+List all current access control lists.
 .TP
 enable
-Enable access control lists
+Enable access control lists.
 .TP
 disable
-Disable access control lists
+Disable access control lists.
 .TP
 role create <role id> [description=<description>] [((read | write | deny) (xpath <query> | id <id>))...]
-Create a role with the id and (optional) description specified.
-Each role can also have an unlimited number of permissions
-(read/write/deny) applied to either an xpath query or the id
-of a specific element in the cib
+Create a role with the id and (optional) description specified.  Each role can also have an unlimited number of permissions (read/write/deny) applied to either an xpath query or the id of a specific element in the cib.
 .TP
 role delete <role id>
-Delete the role specified and remove it from any users/groups it was assigned to
+Delete the role specified and remove it from any users/groups it was assigned to.
 .TP
 role assign <role id> [to] <username/group>
-Assign a role to a user or group already created with 'pcs acl user/group create'
+Assign a role to a user or group already created with 'pcs acl user/group create'.
 .TP
 role unassign <role id> [from] <username/group>
-Remove a role from the specified user
+Remove a role from the specified user.
 .TP
 user create <username> <role id> [<role id>]...
-Create an ACL for the user specified and assign roles to the user
+Create an ACL for the user specified and assign roles to the user.
 .TP
 user delete <username>
-Remove the user specified (and roles assigned will be unassigned for the specified user)
+Remove the user specified (and roles assigned will be unassigned for the specified user).
 .TP
 group create <group> <role id> [<role id>]...
-Create an ACL for the group specified and assign roles to the group
+Create an ACL for the group specified and assign roles to the group.
 .TP
 group delete <group>
-Remove the group specified (and roles assigned will be unassigned for the specified group)
+Remove the group specified (and roles assigned will be unassigned for the specified group).
 .TP
 permission add <role id> ((read | write | deny) (xpath <query> | id <id>))...
-Add the listed permissions to the role specified
+Add the listed permissions to the role specified.
 .TP
 permission delete <permission id>
-Remove the permission id specified (permission id's are listed in parenthesis after permissions in 'pcs acl' output)
+Remove the permission id specified (permission id's are listed in parenthesis after permissions in 'pcs acl' output).
 .SS "property"
 .TP
 [list|show [<property> | \fB\-\-all\fR | \fB\-\-defaults\fR]] | [\fB\-\-all\fR | \fB\-\-defaults\fR]
@@ -398,10 +416,10 @@ Remove property from configuration (or remove attribute from specified node if \
 List all current location, order and colocation constraints, if \fB\-\-full\fR is specified also list the constraint ids.
 .TP
 location <resource id> prefers <node[=score]>...
-Create a location constraint on a resource to prefer the specified node and score (default score: INFINITY)
+Create a location constraint on a resource to prefer the specified node and score (default score: INFINITY).
 .TP
 location <resource id> avoids <node[=score]>...
-Create a location constraint on a resource to avoid the specified node and score (default score: INFINITY)
+Create a location constraint on a resource to avoid the specified node and score (default score: INFINITY).
 .TP
 location <resource id> rule [id=<rule id>] [resource-discovery=<option>] [role=master|slave] [constraint\-id=<id>] [score=<score>|score-attribute=<attribute>] <expression>
 Creates a location rule on the specified resource where the expression looks like one of the following:
@@ -424,16 +442,16 @@ Creates a location rule on the specified resource where the expression looks lik
 .br
 where duration options and date spec options are: hours, monthdays, weekdays, yeardays, months, weeks, years, weekyears, moon. If score is omitted it defaults to INFINITY. If id is omitted one is generated from the resource id. If resource-discovery is omitted it defaults to 'always'.
 .TP
-location show [resources|nodes [node id|resource id]...] [\fB\-\-full\fR]
+location [show [resources|nodes [node id|resource id]...] [\fB\-\-full\fR]]
 List all the current location constraints, if 'resources' is specified location constraints are displayed per resource (default), if 'nodes' is specified location constraints are displayed per node.  If specific nodes or resources are specified then we only show information about them.  If \fB\-\-full\fR is specified show the internal constraint id's as well.
 .TP
 location add <id> <resource id> <node> <score> [resource-discovery=<option>]
-Add a location constraint with the appropriate id, resource id, node name and score. (For more advanced pacemaker usage)
+Add a location constraint with the appropriate id, resource id, node name and score. (For more advanced pacemaker usage.)
 .TP
 location remove <id> [<resource id> <node> <score>]
-Remove a location constraint with the appropriate id, resource id, node name and score. (For more advanced pacemaker usage)
+Remove a location constraint with the appropriate id, resource id, node name and score. (For more advanced pacemaker usage.)
 .TP
-order show [\fB\-\-full\fR]
+order [show] [\fB\-\-full\fR]
 List all current ordering constraints (if \fB\-\-full\fR is specified show the internal constraint id's as well).
 .TP
 order [action] <resource id> then [action] <resource id> [options]
@@ -445,7 +463,7 @@ Create an ordered set of resources. Available options are sequential=true/false,
 order remove <resource1> [resourceN]...
 Remove resource from any ordering constraint
 .TP
-colocation show [\fB\-\-full\fR]
+colocation [show] [\fB\-\-full\fR]
 List all current colocation constraints (if \fB\-\-full\fR is specified show the internal constraint id's as well).
 .TP
 colocation add [master|slave] <source resource id> with [master|slave] <target resource id> [score] [options] [id=constraint\-id]
@@ -455,22 +473,22 @@ colocation set <resource1> [resourceN]... [options] [set <resourceX> ... [option
 Create a colocation constraint with a resource set. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. Available constraint_options are id, score, score-attribute and score-attribute-mangle.
 .TP
 colocation remove <source resource id> <target resource id>
-Remove colocation constraints with <source resource>
+Remove colocation constraints with specified resources.
 .TP
-ticket show [\fB\-\-full\fR]
+ticket [show] [\fB\-\-full\fR]
 List all current ticket constraints (if \fB\-\-full\fR is specified show the internal constraint id's as well).
 .TP
-ticket set <resource1> [resourceN]... [options] [set <resourceX> ... [options]] [setoptions [constraint_options]]
-Create a ticket constraint with a resource set. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. Required constraint option is ticket. Available constraint option is loss-policy=fence/stop/freeze/demote 
-.TP
 ticket add <ticket> [<role>] <resource id> [options] [id=constraint\-id]
-Crate a ticket constraint for <resource id>. Available option is loss-policy=fence/stop/freeze/demote. A role can be master, slave, started or stopped.
+Create a ticket constraint for <resource id>. Available option is loss-policy=fence/stop/freeze/demote. A role can be master, slave, started or stopped.
+.TP
+ticket set <resource1> [resourceN]... [options] [set <resourceX> ... [options]] [setoptions [constraint_options]]
+Create a ticket constraint with a resource set. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. Required constraint option is ticket. Optional constraint option is loss-policy=fence/stop/freeze/demote.
 .TP
 remove [constraint id]...
-Remove constraint(s) or constraint rules with the specified id(s)
+Remove constraint(s) or constraint rules with the specified id(s).
 .TP
 ref <resource>...
-List constraints referencing specified resource
+List constraints referencing specified resource.
 .TP
 rule add <constraint id> [id=<rule id>] [role=master|slave] [score=<score>|score-attribute=<attribute>] <expression>
 Add a rule to a constraint where the expression looks like one of the following:
@@ -494,39 +512,66 @@ Add a rule to a constraint where the expression looks like one of the following:
 where duration options and date spec options are: hours, monthdays, weekdays, yeardays, months, weeks, years, weekyears, moon If score is ommited it defaults to INFINITY. If id is ommited one is generated from the constraint id.
 .TP
 rule remove <rule id>
-Remove a rule if a rule id is specified, if rule is last rule in its constraint, the constraint will be removed
+Remove a rule if a rule id is specified, if rule is last rule in its constraint, the constraint will be removed.
+.SS "qdevice"
+.TP
+setup model <device model> [\fB\-\-enable\fR] [\fB\-\-start\fR]
+Configure specified model of quorum device provider.  Quorum device then may be added to clusters by "pcs quorum device add" command.  \fB\-\-start\fR will also start the provider.  \fB\-\-enable\fR will configure the provider to start on boot.
+.TP
+destroy <device model>
+Disable and stop specified model of quorum device provider and delete its configuration files.
+.TP
+start <device model>
+Start specified model of quorum device provider.
+.TP
+stop <device model>
+Stop specified model of quorum device provider.
+.TP
+kill <device model>
+Force specified model of quorum device provider to stop (performs kill -9).
+.TP
+enable <device model>
+Configure specified model of quorum device provider to start on boot.
+.TP
+disable <device model>
+Configure specified model of quorum device provider to not start on boot.
 .SS "quorum"
 .TP
 config
 Show quorum configuration.
 .TP
 device add [generic options] model <device model> [model options]
-Add quorum device to cluster.
+Add quorum device to cluster.  Quorum device needs to be created first by "pcs qdevice setup" command.
 .TP
 device remove
 Remove quorum device from cluster.
 .TP
 device update [generic options] [model <model options>]
-Add/Change quorum device options.
+Add/Change quorum device options.  Requires cluster to be stopped.
+.TP
+unblock [\fB\-\-force\fR]
+Cancel waiting for all nodes when establishing quorum.  Useful in situations where you know the cluster is inquorate, but you are confident that the cluster should proceed with resource management regardless.  This command should ONLY be used when nodes which the cluster is waiting for have been confirmed to be powered off and to have no access to shared resources.
+
+.B WARNING: If the nodes are not actually powered off or they do have access to shared resources, data corruption/cluster failure can occur. To prevent accidental running of this command, \-\-force or interactive user response is required in order to proceed.
 .TP
 update [auto_tie_breaker=[0|1]] [last_man_standing=[0|1]] [last_man_standing_window=[<time in ms>]] [wait_for_all=[0|1]]
-Add/Change quorum options.  At least one option must be specified.  Options are documented in corosync's votequorum(5) man page.
+Add/Change quorum options.  At least one option must be specified.  Options are documented in corosync's votequorum(5) man page.  Requires cluster to be stopped.
 .SS "status"
 .TP
 [status] [\fB\-\-full\fR | \fB\-\-hide-inactive\fR]
-View all information about the cluster and resources (\fB\-\-full\fR provides more details, \fB\-\-hide-inactive\fR hides inactive resources)
+View all information about the cluster and resources (\fB\-\-full\fR provides more details, \fB\-\-hide-inactive\fR hides inactive resources).
 .TP
 resources
-View current status of cluster resources
+View current status of cluster resources.
 .TP
 groups
-View currently configured groups and their resources
+View currently configured groups and their resources.
 .TP
 cluster
-View current cluster status
+View current cluster status.
 .TP
 corosync
-View current membership information as seen by corosync
+View current membership information as seen by corosync.
 .TP
 nodes [corosync|both|config]
 View current status of nodes from pacemaker. If 'corosync' is specified, print nodes currently configured in corosync, if 'both' is specified, print nodes from both corosync & pacemaker.  If 'config' is specified, print nodes from corosync & pacemaker configuration.
@@ -535,11 +580,11 @@ pcsd [<node>] ...
 Show the current status of pcsd on the specified nodes. When no nodes are specified, status of all nodes is displayed.
 .TP
 xml
-View xml version of status (output from crm_mon \fB\-r\fR \fB\-1\fR \fB\-X\fR)
+View xml version of status (output from crm_mon \fB\-r\fR \fB\-1\fR \fB\-X\fR).
 .SS "config"
 .TP
 [show]
-View full cluster configuration
+View full cluster configuration.
 .TP
 backup [filename]
 Creates the tarball containing the cluster configuration files.  If filename is not specified the standard output will be used.
diff --git a/pcs/qdevice.py b/pcs/qdevice.py
new file mode 100644
index 0000000..1f06709
--- /dev/null
+++ b/pcs/qdevice.py
@@ -0,0 +1,89 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import sys
+
+from pcs import (
+    usage,
+    utils,
+)
+from pcs.cli.common.errors import CmdLineInputError
+from pcs.lib.errors import LibraryError
+
+def qdevice_cmd(lib, argv, modifiers):
+    if len(argv) < 1:
+        usage.qdevice()
+        sys.exit(1)
+
+    sub_cmd, argv_next = argv[0], argv[1:]
+    try:
+        if sub_cmd == "help":
+            usage.qdevice(argv)
+        elif sub_cmd == "setup":
+            qdevice_setup_cmd(lib, argv_next, modifiers)
+        elif sub_cmd == "destroy":
+            qdevice_destroy_cmd(lib, argv_next, modifiers)
+        elif sub_cmd == "start":
+            qdevice_start_cmd(lib, argv_next, modifiers)
+        elif sub_cmd == "stop":
+            qdevice_stop_cmd(lib, argv_next, modifiers)
+        elif sub_cmd == "kill":
+            qdevice_kill_cmd(lib, argv_next, modifiers)
+        elif sub_cmd == "enable":
+            qdevice_enable_cmd(lib, argv_next, modifiers)
+        elif sub_cmd == "disable":
+            qdevice_disable_cmd(lib, argv_next, modifiers)
+        else:
+            raise CmdLineInputError()
+    except LibraryError as e:
+        utils.process_library_reports(e.args)
+    except CmdLineInputError as e:
+        utils.exit_on_cmdline_input_errror(e, "qdevice", sub_cmd)
+
+def qdevice_setup_cmd(lib, argv, modifiers):
+    if len(argv) != 2:
+        raise CmdLineInputError()
+    if argv[0] != "model":
+        raise CmdLineInputError()
+    model = argv[1]
+    lib.qdevice.setup(model, modifiers["enable"], modifiers["start"])
+
+def qdevice_destroy_cmd(lib, argv, modifiers):
+    if len(argv) != 1:
+        raise CmdLineInputError()
+    model = argv[0]
+    lib.qdevice.destroy(model)
+
+def qdevice_start_cmd(lib, argv, modifiers):
+    if len(argv) != 1:
+        raise CmdLineInputError()
+    model = argv[0]
+    lib.qdevice.start(model)
+
+def qdevice_stop_cmd(lib, argv, modifiers):
+    if len(argv) != 1:
+        raise CmdLineInputError()
+    model = argv[0]
+    lib.qdevice.stop(model)
+
+def qdevice_kill_cmd(lib, argv, modifiers):
+    if len(argv) != 1:
+        raise CmdLineInputError()
+    model = argv[0]
+    lib.qdevice.kill(model)
+
+def qdevice_enable_cmd(lib, argv, modifiers):
+    if len(argv) != 1:
+        raise CmdLineInputError()
+    model = argv[0]
+    lib.qdevice.enable(model)
+
+def qdevice_disable_cmd(lib, argv, modifiers):
+    if len(argv) != 1:
+        raise CmdLineInputError()
+    model = argv[0]
+    lib.qdevice.disable(model)
diff --git a/pcs/quorum.py b/pcs/quorum.py
index a63a0b4..f793a21 100644
--- a/pcs/quorum.py
+++ b/pcs/quorum.py
@@ -11,6 +11,7 @@ from pcs import (
     usage,
     utils,
 )
+from pcs.cluster import cluster_quorum_unblock
 from pcs.cli.common import parse_args
 from pcs.cli.common.console_report import indent
 from pcs.cli.common.errors import CmdLineInputError
@@ -29,6 +30,8 @@ def quorum_cmd(lib, argv, modificators):
             quorum_config_cmd(lib, argv_next, modificators)
         elif sub_cmd == "device":
             quorum_device_cmd(lib, argv_next, modificators)
+        elif sub_cmd == "unblock":
+            cluster_quorum_unblock(argv_next)
         elif sub_cmd == "update":
             quorum_update_cmd(lib, argv_next, modificators)
         else:
diff --git a/pcs/resource.py b/pcs/resource.py
index 0dfdb03..284bdb2 100644
--- a/pcs/resource.py
+++ b/pcs/resource.py
@@ -23,6 +23,7 @@ from pcs import (
 )
 import pcs.lib.cib.acl as lib_acl
 import pcs.lib.pacemaker as lib_pacemaker
+from pcs.lib.external import get_systemd_services
 from pcs.cli.common.errors import CmdLineInputError
 from pcs.cli.common.parse_args import prepare_options
 from pcs.lib.errors import LibraryError
@@ -278,7 +279,7 @@ def resource_list_available(argv):
                     full_res_name,
                     lib_ra.get_agent_desc(metadata)["shortdesc"]
                 ))
-            except LibraryError:
+            except (LibraryError, lib_ra.ResourceAgentLibError):
                 pass
 
     # lsb agents
@@ -289,13 +290,8 @@ def resource_list_available(argv):
             ret.append("lsb:" + agent)
 
     # systemd agents
-    if utils.is_systemctl():
-        agents, dummy_retval = utils.run(["systemctl", "list-unit-files", "--full"])
-        agents = agents.split("\n")
-    for agent in agents:
-        match = re.search(r'^([\S]*)\.service',agent)
-        if match:
-            ret.append("systemd:" + match.group(1))
+    for service in get_systemd_services(utils.cmd_runner()):
+        ret.append("systemd:{0}".format(service))
 
     # nagios metadata
     if os.path.isdir(settings.nagios_metadata_path):
@@ -317,7 +313,7 @@ def resource_list_available(argv):
                     full_res_name,
                     lib_ra.get_agent_desc(metadata)["shortdesc"]
                 ))
-            except LibraryError:
+            except (LibraryError, lib_ra.ResourceAgentLibError):
                 pass
 
     # output
@@ -386,6 +382,10 @@ def resource_list_options(resource):
         return
     except lib_ra.UnsupportedResourceAgent:
         pass
+    except lib_ra.ResourceAgentLibError as e:
+        utils.process_library_reports(
+            [lib_ra.resource_agent_lib_error_to_report_item(e)]
+        )
     except LibraryError as e:
         utils.process_library_reports(e.args)
 
@@ -401,7 +401,7 @@ def resource_list_options(resource):
             descriptions, parameters = get_desc_params(agent)
             resource_print_options(agent, descriptions, parameters)
             return
-        except LibraryError:
+        except (LibraryError, lib_ra.ResourceAgentLibError):
             pass
 
     # still not found, now lets look at nagios plugins
@@ -410,7 +410,7 @@ def resource_list_options(resource):
             agent = "nagios:" + resource
             descriptions, parameters = get_desc_params(agent)
             resource_print_options(agent, descriptions, parameters)
-        except LibraryError:
+        except (LibraryError, lib_ra.ResourceAgentLibError):
             utils.err("Unable to find resource: {0}".format(resource))
 
 # Return the string formatted with a line length of 79 and indented
@@ -558,6 +558,10 @@ def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_
                 dict(params),
                 get_full_ra_type(ra_type, True)
             )
+        except lib_ra.ResourceAgentLibError as e:
+            utils.process_library_reports(
+                [lib_ra.resource_agent_lib_error_to_report_item(e)]
+            )
         except LibraryError as e:
             utils.process_library_reports(e.args)
         if len(bad_opts) != 0:
@@ -896,6 +900,10 @@ def resource_update(res_id,args):
             bad_opts, _ = lib_ra.validate_instance_attributes(
                 utils.cmd_runner(), dict(params), resource_type
             )
+        except lib_ra.ResourceAgentLibError as e:
+            utils.process_library_reports(
+                [lib_ra.resource_agent_lib_error_to_report_item(e)]
+            )
         except LibraryError as e:
             utils.process_library_reports(e.args)
         if len(bad_opts) != 0:
@@ -2833,5 +2841,9 @@ def get_resource_agent_info(argv):
         )
 
         print(json.dumps(metadata))
-    except lib_ra.LibraryError as e:
+    except lib_ra.ResourceAgentLibError as e:
+        utils.process_library_reports(
+            [lib_ra.resource_agent_lib_error_to_report_item(e)]
+        )
+    except LibraryError as e:
         utils.process_library_reports(e.args)
diff --git a/pcs/settings_default.py b/pcs/settings_default.py
index 6b9c728..3acd8e0 100644
--- a/pcs/settings_default.py
+++ b/pcs/settings_default.py
@@ -3,14 +3,24 @@ import os.path
 pacemaker_binaries = "/usr/sbin/"
 corosync_binaries = "/usr/sbin/"
 ccs_binaries = "/usr/sbin/"
-corosync_conf_file = "/etc/corosync/corosync.conf"
+corosync_conf_dir = "/etc/corosync/"
+corosync_conf_file = os.path.join(corosync_conf_dir, "corosync.conf")
+corosync_uidgid_dir = os.path.join(corosync_conf_dir, "uidgid.d/")
+corosync_qdevice_net_server_certs_dir = os.path.join(
+    corosync_conf_dir,
+    "qdevice/net/qnetd/nssdb"
+)
+corosync_qdevice_net_client_certs_dir = os.path.join(
+    corosync_conf_dir,
+    "qdevice/net/node/nssdb"
+)
 cluster_conf_file = "/etc/cluster/cluster.conf"
 fence_agent_binaries = "/usr/sbin/"
 pengine_binary = "/usr/libexec/pacemaker/pengine"
 crmd_binary = "/usr/libexec/pacemaker/crmd"
 cib_binary = "/usr/libexec/pacemaker/cib"
 stonithd_binary = "/usr/libexec/pacemaker/stonithd"
-pcs_version = "0.9.151"
+pcs_version = "0.9.152"
 crm_report = pacemaker_binaries + "crm_report"
 crm_verify = pacemaker_binaries + "crm_verify"
 crm_mon_schema = '/usr/share/pacemaker/crm_mon.rng'
@@ -20,10 +30,11 @@ pcsd_tokens_location = "/var/lib/pcsd/tokens"
 pcsd_users_conf_location = "/var/lib/pcsd/pcs_users.conf"
 pcsd_settings_conf_location = "/var/lib/pcsd/pcs_settings.conf"
 pcsd_exec_location = "/usr/lib/pcsd/"
-corosync_uidgid_dir = "/etc/corosync/uidgid.d/"
 cib_dir = "/var/lib/pacemaker/cib/"
 pacemaker_uname = "hacluster"
 pacemaker_gname = "haclient"
 ocf_root = "/usr/lib/ocf/"
 ocf_resources = os.path.join(ocf_root, "resource.d/")
 nagios_metadata_path = "/usr/share/pacemaker/nagios/plugins-metadata/"
+sbd_watchdog_default = "/dev/watchdog"
+sbd_config = "/etc/sysconfig/sbd"
diff --git a/pcs/stonith.py b/pcs/stonith.py
index 5937323..ab9e926 100644
--- a/pcs/stonith.py
+++ b/pcs/stonith.py
@@ -16,62 +16,63 @@ from pcs import (
     usage,
     utils,
 )
+from pcs.cli.common import parse_args
 from pcs.cli.common.errors import CmdLineInputError
-from pcs.lib.errors import LibraryError
+from pcs.lib.errors import LibraryError, ReportItemSeverity
 import pcs.lib.resource_agent as lib_ra
 
 def stonith_cmd(argv):
+    lib = utils.get_library_wrapper()
+    modifiers = utils.get_modificators()
     if len(argv) == 0:
         argv = ["show"]
 
     sub_cmd = argv.pop(0)
-    if (sub_cmd == "help"):
-        usage.stonith(argv)
-    elif (sub_cmd == "list"):
-        stonith_list_available(argv)
-    elif (sub_cmd == "describe"):
-        if len(argv) == 1:
-            stonith_list_options(argv[0])
-        else:
-            usage.stonith()
-            sys.exit(1)
-    elif (sub_cmd == "create"):
-        stonith_create(argv)
-    elif (sub_cmd == "update"):
-        if len(argv) > 1:
-            stn_id = argv.pop(0)
-            resource.resource_update(stn_id,argv)
-        else:
-            usage.stonith(["update"])
-            sys.exit(1)
-    elif (sub_cmd == "delete"):
-        if len(argv) == 1:
-            stn_id = argv.pop(0)
-            resource.resource_remove(stn_id)
-        else:
-            usage.stonith(["delete"])
-            sys.exit(1)
-    elif (sub_cmd == "show"):
-        resource.resource_show(argv, True)
-        stonith_level([])
-    elif (sub_cmd == "level"):
-        stonith_level(argv)
-    elif (sub_cmd == "fence"):
-        stonith_fence(argv)
-    elif (sub_cmd == "cleanup"):
-        try:
+    try:
+        if (sub_cmd == "help"):
+            usage.stonith(argv)
+        elif (sub_cmd == "list"):
+            stonith_list_available(argv)
+        elif (sub_cmd == "describe"):
+            if len(argv) == 1:
+                stonith_list_options(argv[0])
+            else:
+                raise CmdLineInputError()
+        elif (sub_cmd == "create"):
+            stonith_create(argv)
+        elif (sub_cmd == "update"):
+            if len(argv) > 1:
+                stn_id = argv.pop(0)
+                resource.resource_update(stn_id,argv)
+            else:
+                raise CmdLineInputError()
+        elif (sub_cmd == "delete"):
+            if len(argv) == 1:
+                stn_id = argv.pop(0)
+                resource.resource_remove(stn_id)
+            else:
+                raise CmdLineInputError()
+        elif (sub_cmd == "show"):
+            resource.resource_show(argv, True)
+            stonith_level([])
+        elif (sub_cmd == "level"):
+            stonith_level(argv)
+        elif (sub_cmd == "fence"):
+            stonith_fence(argv)
+        elif (sub_cmd == "cleanup"):
             resource.resource_cleanup(argv)
-        except CmdLineInputError as e:
-            utils.exit_on_cmdline_input_errror(e, "stonith", 'cleanup')
-        except LibraryError as e:
-            utils.process_library_reports(e.args)
-    elif (sub_cmd == "confirm"):
-        stonith_confirm(argv)
-    elif (sub_cmd == "get_fence_agent_info"):
-        get_fence_agent_info(argv)
-    else:
-        usage.stonith()
-        sys.exit(1)
+        elif (sub_cmd == "confirm"):
+            stonith_confirm(argv)
+        elif (sub_cmd == "get_fence_agent_info"):
+            get_fence_agent_info(argv)
+        elif (sub_cmd == "sbd"):
+            sbd_cmd(lib, argv, modifiers)
+        else:
+            raise CmdLineInputError()
+    except LibraryError as e:
+        utils.process_library_reports(e.args)
+    except CmdLineInputError as e:
+        utils.exit_on_cmdline_input_errror(e, "stonith", sub_cmd)
 
 def stonith_list_available(argv):
     if len(argv) != 0:
@@ -110,6 +111,12 @@ def stonith_list_available(argv):
                     sd = " - " + resource.format_desc(
                         len(agent_name) + 3, shortdesc
                     )
+            except lib_ra.ResourceAgentLibError as e:
+                utils.process_library_reports([
+                    lib_ra.resource_agent_lib_error_to_report_item(
+                        e, ReportItemSeverity.WARNING
+                    )
+                ])
             except LibraryError as e:
                 utils.err(
                     e.args[-1].message, False
@@ -124,6 +131,10 @@ def stonith_list_options(stonith_agent):
         desc = lib_ra.get_agent_desc(metadata)
         params = lib_ra.get_fence_agent_parameters(runner, metadata)
         resource.resource_print_options(stonith_agent, desc, params)
+    except lib_ra.ResourceAgentLibError as e:
+        utils.process_library_reports(
+            [lib_ra.resource_agent_lib_error_to_report_item(e)]
+        )
     except LibraryError as e:
         utils.process_library_reports(e.args)
 
@@ -148,6 +159,17 @@ def stonith_create(argv):
                 meta for meta in meta_values if not meta.startswith("provides=")
             ]
             meta_values.append("provides=unfencing")
+    except lib_ra.ResourceAgentLibError as e:
+        forced = utils.get_modificators().get("force", False)
+        if forced:
+            severity = ReportItemSeverity.WARNING
+        else:
+            severity = ReportItemSeverity.ERROR
+        utils.process_library_reports([
+            lib_ra.resource_agent_lib_error_to_report_item(
+                e, severity, not forced
+            )
+        ])
     except LibraryError as e:
         utils.process_library_reports(e.args)
 
@@ -352,11 +374,23 @@ def stonith_fence(argv):
     else:
         print("Node: %s fenced" % node)
 
-def stonith_confirm(argv):
+def stonith_confirm(argv, skip_question=False):
     if len(argv) != 1:
         utils.err("must specify one (and only one) node to confirm fenced")
 
     node = argv.pop(0)
+    if not skip_question and "--force" not in utils.pcs_options:
+        answer = utils.get_terminal_input(
+            (
+                "WARNING: If node {node} is not powered off or it does"
+                + " have access to shared resources, data corruption and/or"
+                + " cluster failure may occur. Are you sure you want to"
+                + " continue? [y/N] "
+            ).format(node=node)
+        )
+        if answer.lower() not in ["y", "yes"]:
+            print("Canceled")
+            return
     args = ["stonith_admin", "-C", node]
     output, retval = utils.run(args)
 
@@ -400,5 +434,133 @@ def get_fence_agent_info(argv):
         )
 
         print(json.dumps(metadata))
-    except lib_ra.LibraryError as e:
+    except lib_ra.ResourceAgentLibError as e:
+        utils.process_library_reports(
+            [lib_ra.resource_agent_lib_error_to_report_item(e)]
+        )
+    except LibraryError as e:
         utils.process_library_reports(e.args)
+
+
+def sbd_cmd(lib, argv, modifiers):
+    if len(argv) == 0:
+        raise CmdLineInputError()
+    cmd = argv.pop(0)
+    try:
+        if cmd == "enable":
+            sbd_enable(lib, argv, modifiers)
+        elif cmd == "disable":
+            sbd_disable(lib, argv, modifiers)
+        elif cmd == "status":
+            sbd_status(lib, argv, modifiers)
+        elif cmd == "config":
+            sbd_config(lib, argv, modifiers)
+        elif cmd == "local_config_in_json":
+            local_sbd_config(lib, argv, modifiers)
+        else:
+            raise CmdLineInputError()
+    except CmdLineInputError as e:
+        utils.exit_on_cmdline_input_errror(
+            e, "stonith", "sbd {0}".format(cmd)
+        )
+
+
+def sbd_enable(lib, argv, modifiers):
+    sbd_cfg = parse_args.prepare_options(argv)
+    default_watchdog, watchdog_dict = _sbd_parse_watchdogs(
+        modifiers["watchdog"]
+    )
+    lib.sbd.enable_sbd(
+        default_watchdog,
+        watchdog_dict,
+        sbd_cfg,
+        allow_unknown_opts=modifiers["force"],
+        ignore_offline_nodes=modifiers["skip_offline_nodes"]
+    )
+
+
+def _sbd_parse_watchdogs(watchdog_list):
+    default_watchdog = None
+    watchdog_dict = {}
+
+    for watchdog_node in watchdog_list:
+        if "@" not in watchdog_node:
+            if default_watchdog:
+                raise CmdLineInputError("Multiple default watchdogs.")
+            default_watchdog = watchdog_node
+        else:
+            watchdog, node_name = watchdog_node.rsplit("@", 1)
+            if node_name in watchdog_dict:
+                raise CmdLineInputError(
+                    "Multiple watchdog definitions for node '{node}'".format(
+                        node=node_name
+                    )
+                )
+            watchdog_dict[node_name] = watchdog
+
+    return default_watchdog, watchdog_dict
+
+
+def sbd_disable(lib, argv, modifiers):
+    if argv:
+        raise CmdLineInputError()
+
+    lib.sbd.disable_sbd(modifiers["skip_offline_nodes"])
+
+
+def sbd_status(lib, argv, modifiers):
+    def _bool_to_str(val):
+        if val is None:
+            return "N/A"
+        return "YES" if val else " NO"
+
+    if argv:
+        raise CmdLineInputError()
+
+    status_list = lib.sbd.get_cluster_sbd_status()
+    if not len(status_list):
+        utils.err("Unable to get SBD status from any node.")
+
+    print("SBD STATUS")
+    print("<node name>: <installed> | <enabled> | <running>")
+    for node_status in status_list:
+        status = node_status["status"]
+        print("{node}: {installed} | {enabled} | {running}".format(
+            node=node_status["node"].label,
+            installed=_bool_to_str(status.get("installed")),
+            enabled=_bool_to_str(status.get("enabled")),
+            running=_bool_to_str(status.get("running"))
+        ))
+
+
+def sbd_config(lib, argv, modifiers):
+    if argv:
+        raise CmdLineInputError()
+
+    config_list = lib.sbd.get_cluster_sbd_config()
+
+    if not config_list:
+        utils.err("No config obtained.")
+
+    config = config_list[0]["config"]
+
+    filtered_options = ["SBD_WATCHDOG_DEV", "SBD_OPTS"]
+    for key, val in config.items():
+        if key in filtered_options:
+            continue
+        print("{key}={val}".format(key=key, val=val))
+
+    print()
+    print("Watchdogs:")
+    for config in config_list:
+        watchdog = "<unknown>"
+        if config["config"] is not None:
+            watchdog = config["config"].get("SBD_WATCHDOG_DEV", "<unknown>")
+        print("  {node}: {watchdog}".format(
+            node=config["node"].label,
+            watchdog=watchdog
+        ))
+
+
+def local_sbd_config(lib, argv, modifiers):
+    print(json.dumps(lib.sbd.get_local_sbd_config()))
diff --git a/pcs/test/suite.py b/pcs/test/suite.py
index 696c699..85dd20c 100755
--- a/pcs/test/suite.py
+++ b/pcs/test/suite.py
@@ -33,11 +33,36 @@ def prepare_test_name(test_name):
     """
     return test_name.replace("/", ".")
 
-def discover_tests(test_name_list):
-    loader = unittest.TestLoader()
-    if test_name_list:
-        return loader.loadTestsFromNames(map(prepare_test_name, test_name_list))
-    return loader.discover(PACKAGE_DIR, pattern='test_*.py')
+def tests_from_suite(test_candidate):
+    if isinstance(test_candidate, unittest.TestCase):
+        return [test_candidate.id()]
+    test_id_list = []
+    for test in test_candidate:
+        test_id_list.extend(tests_from_suite(test))
+    return test_id_list
+
+def autodiscover_tests():
+    #...Find all the test modules by recursing into subdirectories from the
+    #specified start directory...
+    #...All test modules must be importable from the top level of the project.
+    #If the start directory is not the top level directory then the top level
+    #directory must be specified separately...
+    #So test are loaded from PACKAGE_DIR/pcs but their names starts with "pcs."
+    return unittest.TestLoader().discover(
+        start_dir=os.path.join(PACKAGE_DIR, "pcs"),
+        pattern='test_*.py',
+        top_level_dir=PACKAGE_DIR,
+    )
+
+def discover_tests(explicitly_enumerated_tests, exclude_enumerated_tests=False):
+    if not explicitly_enumerated_tests:
+        return autodiscover_tests()
+    if exclude_enumerated_tests:
+        return unittest.TestLoader().loadTestsFromNames([
+            test_name for test_name in tests_from_suite(autodiscover_tests())
+            if test_name not in explicitly_enumerated_tests
+        ])
+    return unittest.TestLoader().loadTestsFromNames(explicitly_enumerated_tests)
 
 def run_tests(tests, verbose=False, color=False):
     resultclass = unittest.runner.TextTestResult
@@ -52,11 +77,16 @@ def run_tests(tests, verbose=False, color=False):
     testRunner.run(tests)
 
 put_package_to_path()
-tests = discover_tests([
-    arg for arg in sys.argv[1:] if arg not in ("-v", "--color", "--no-color")
-])
+explicitly_enumerated_tests = [
+    prepare_test_name(arg) for arg in sys.argv[1:] if arg not in (
+        "-v",
+        "--color",
+        "--no-color",
+        "--all-but",
+    )
+]
 run_tests(
-    tests,
+    discover_tests(explicitly_enumerated_tests, "--all-but" in sys.argv),
     verbose="-v" in sys.argv,
     color=(
         "--color" in sys.argv
@@ -80,8 +110,11 @@ run_tests(
 #
 # run specific test:
 # IMPORTANT: in 2.6 module.class.method doesn't work but module.class works fine
-# pcs/test/suite.py test_acl.ACLTest -v
-# pcs/test/suite.py test_acl.ACLTest.testAutoUpgradeofCIB
+# pcs/test/suite.py pcs.test.test_acl.ACLTest -v
+# pcs/test/suite.py pcs.test.test_acl.ACLTest.testAutoUpgradeofCIB
+#
+# run all test except some:
+# pcs/test/suite.py pcs.test_acl.ACLTest --all-but
 #
 # for colored test report
 # pcs/test/suite.py --color
diff --git a/pcs/test/test_common_tools.py b/pcs/test/test_common_tools.py
new file mode 100644
index 0000000..5c8482e
--- /dev/null
+++ b/pcs/test/test_common_tools.py
@@ -0,0 +1,65 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+import time
+
+from pcs.common import tools
+
+
+class TestException(Exception):
+    pass
+
+
+class SimpleCacheTestCase(TestCase):
+    def test_called_only_once(self):
+        counter = []
+
+        @tools.simple_cache
+        def adder():
+            counter.append(None)
+            return len(counter)
+
+        self.assertEqual(1, adder())
+        self.assertEqual(1, len(counter))
+        self.assertEqual(1, adder())
+        self.assertEqual(1, len(counter))
+        counter.append(None)
+        self.assertEqual(1, adder())
+        self.assertEqual(2, len(counter))
+
+    def test_exception_not_cached(self):
+        counter = []
+
+        @tools.simple_cache
+        def adder():
+            counter.append(None)
+            raise TestException()
+
+        self.assertRaises(TestException, adder)
+        self.assertEqual(1, len(counter))
+        self.assertRaises(TestException, adder)
+        self.assertEqual(2, len(counter))
+
+
+class RunParallelTestCase(TestCase):
+    def test_run_all(self):
+        data_list = [([i], {}) for i in range(5)]
+        out_list = []
+        tools.run_parallel(out_list.append, data_list)
+        self.assertEqual(sorted(out_list), [i for i in range(5)])
+
+    def test_parallelism(self):
+        x = 5
+        data_list = [[[i + 1], {}] for i in range(x)]
+        start_time = time.time()
+        # this should last for least x seconds, but less than sum of all times
+        tools.run_parallel(time.sleep, data_list)
+        finish_time = time.time()
+        elapsed_time = finish_time - start_time
+        self.assertTrue(elapsed_time > x)
+        self.assertTrue(elapsed_time < sum([i + 1 for i in range(x)]))
diff --git a/pcs/test/test_lib_commands_qdevice.py b/pcs/test/test_lib_commands_qdevice.py
new file mode 100644
index 0000000..3900c1d
--- /dev/null
+++ b/pcs/test/test_lib_commands_qdevice.py
@@ -0,0 +1,759 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+import logging
+
+from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.assertions import (
+    assert_raise_library_error,
+    assert_report_item_list_equal,
+)
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+
+from pcs.common import report_codes
+from pcs.lib.env import LibraryEnvironment
+from pcs.lib.errors import LibraryError, ReportItemSeverity as severity
+from pcs.lib.external import (
+    DisableServiceError,
+    EnableServiceError,
+    StartServiceError,
+    StopServiceError,
+    KillServicesError,
+)
+
+import pcs.lib.commands.qdevice as lib
+
+
+class QdeviceTestCase(TestCase):
+    def setUp(self):
+        self.mock_logger = mock.MagicMock(logging.Logger)
+        self.mock_reporter = MockLibraryReportProcessor()
+        self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+
+ at mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
+class QdeviceDisabledOnCmanTest(QdeviceTestCase):
+    def base_test(self, func):
+        assert_raise_library_error(
+            func,
+            (
+                severity.ERROR,
+                report_codes.CMAN_UNSUPPORTED_COMMAND,
+                {}
+            )
+        )
+
+    def test_setup(self):
+        self.base_test(
+            lambda: lib.qdevice_setup(self.lib_env, "bad model", False, False)
+        )
+
+    def test_destroy(self):
+        self.base_test(
+            lambda: lib.qdevice_destroy(self.lib_env, "bad model")
+        )
+
+    def test_enable(self):
+        self.base_test(
+            lambda: lib.qdevice_enable(self.lib_env, "bad model")
+        )
+
+    def test_disable(self):
+        self.base_test(
+            lambda: lib.qdevice_disable(self.lib_env, "bad model")
+        )
+
+    def test_start(self):
+        self.base_test(
+            lambda: lib.qdevice_start(self.lib_env, "bad model")
+        )
+
+    def test_stop(self):
+        self.base_test(
+            lambda: lib.qdevice_stop(self.lib_env, "bad model")
+        )
+
+    def test_kill(self):
+        self.base_test(
+            lambda: lib.qdevice_kill(self.lib_env, "bad model")
+        )
+
+
+ at mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+class QdeviceBadModelTest(QdeviceTestCase):
+    def base_test(self, func):
+        assert_raise_library_error(
+            func,
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "model",
+                    "option_value": "bad model",
+                    "allowed_values": ["net"],
+                }
+            )
+        )
+
+    def test_setup(self):
+        self.base_test(
+            lambda: lib.qdevice_setup(self.lib_env, "bad model", False, False)
+        )
+
+    def test_destroy(self):
+        self.base_test(
+            lambda: lib.qdevice_destroy(self.lib_env, "bad model")
+        )
+
+    def test_enable(self):
+        self.base_test(
+            lambda: lib.qdevice_enable(self.lib_env, "bad model")
+        )
+
+    def test_disable(self):
+        self.base_test(
+            lambda: lib.qdevice_disable(self.lib_env, "bad model")
+        )
+
+    def test_start(self):
+        self.base_test(
+            lambda: lib.qdevice_start(self.lib_env, "bad model")
+        )
+
+    def test_stop(self):
+        self.base_test(
+            lambda: lib.qdevice_stop(self.lib_env, "bad model")
+        )
+
+    def test_kill(self):
+        self.base_test(
+            lambda: lib.qdevice_kill(self.lib_env, "bad model")
+        )
+
+
+ at mock.patch("pcs.lib.external.start_service")
+ at mock.patch("pcs.lib.external.enable_service")
+ at mock.patch("pcs.lib.commands.qdevice.qdevice_net.qdevice_setup")
+ at mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+ at mock.patch.object(
+    LibraryEnvironment,
+    "cmd_runner",
+    lambda self: "mock_runner"
+)
+class QdeviceNetSetupTest(QdeviceTestCase):
+    def test_success(self, mock_net_setup, mock_net_enable, mock_net_start):
+        lib.qdevice_setup(self.lib_env, "net", False, False)
+
+        mock_net_setup.assert_called_once_with("mock_runner")
+        mock_net_enable.assert_not_called()
+        mock_net_start.assert_not_called()
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.QDEVICE_INITIALIZATION_SUCCESS,
+                    {
+                        "model": "net",
+                    }
+                )
+            ]
+        )
+
+    def test_start_enable_success(
+        self, mock_net_setup, mock_net_enable, mock_net_start
+    ):
+        lib.qdevice_setup(self.lib_env, "net", True, True)
+
+        mock_net_setup.assert_called_once_with("mock_runner")
+        mock_net_enable.assert_called_once_with("mock_runner", "corosync-qnetd")
+        mock_net_start.assert_called_once_with("mock_runner", "corosync-qnetd")
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.QDEVICE_INITIALIZATION_SUCCESS,
+                    {
+                        "model": "net",
+                    }
+                ),
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_ENABLE_SUCCESS,
+                    {
+                        "service": "quorum device",
+                    }
+                ),
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_START_STARTED,
+                    {
+                        "service": "quorum device",
+                    }
+                ),
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_START_SUCCESS,
+                    {
+                        "service": "quorum device",
+                    }
+                )
+            ]
+        )
+
+    def test_init_failed(
+        self, mock_net_setup, mock_net_enable, mock_net_start
+    ):
+        mock_net_setup.side_effect = LibraryError("mock_report_item")
+        self.assertRaises(
+            LibraryError,
+            lambda: lib.qdevice_setup(self.lib_env, "net", False, False)
+        )
+        mock_net_setup.assert_called_once_with("mock_runner")
+        mock_net_enable.assert_not_called()
+        mock_net_start.assert_not_called()
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            []
+        )
+
+    def test_enable_failed(
+        self, mock_net_setup, mock_net_enable, mock_net_start
+    ):
+        mock_net_enable.side_effect = EnableServiceError(
+            "test service",
+            "test error"
+        )
+
+        assert_raise_library_error(
+            lambda: lib.qdevice_setup(self.lib_env, "net", True, True),
+            (
+                severity.ERROR,
+                report_codes.SERVICE_ENABLE_ERROR,
+                {
+                    "service": "test service",
+                    "reason": "test error",
+                }
+            )
+        )
+
+        mock_net_setup.assert_called_once_with("mock_runner")
+        mock_net_enable.assert_called_once_with("mock_runner", "corosync-qnetd")
+        mock_net_start.assert_not_called()
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.QDEVICE_INITIALIZATION_SUCCESS,
+                    {
+                        "model": "net",
+                    }
+                )
+            ]
+        )
+
+    def test_start_failed(
+        self, mock_net_setup, mock_net_enable, mock_net_start
+    ):
+        mock_net_start.side_effect = StartServiceError(
+            "test service",
+            "test error"
+        )
+
+        assert_raise_library_error(
+            lambda: lib.qdevice_setup(self.lib_env, "net", True, True),
+            (
+                severity.ERROR,
+                report_codes.SERVICE_START_ERROR,
+                {
+                    "service": "test service",
+                    "reason": "test error",
+                }
+            )
+        )
+
+        mock_net_setup.assert_called_once_with("mock_runner")
+        mock_net_enable.assert_called_once_with("mock_runner", "corosync-qnetd")
+        mock_net_start.assert_called_once_with("mock_runner", "corosync-qnetd")
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.QDEVICE_INITIALIZATION_SUCCESS,
+                    {
+                        "model": "net",
+                    }
+                ),
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_ENABLE_SUCCESS,
+                    {
+                        "service": "quorum device",
+                    }
+                ),
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_START_STARTED,
+                    {
+                        "service": "quorum device",
+                    }
+                )
+            ]
+        )
+
+
+ at mock.patch("pcs.lib.external.stop_service")
+ at mock.patch("pcs.lib.external.disable_service")
+ at mock.patch("pcs.lib.commands.qdevice.qdevice_net.qdevice_destroy")
+ at mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+ at mock.patch.object(
+    LibraryEnvironment,
+    "cmd_runner",
+    lambda self: "mock_runner"
+)
+class QdeviceNetDestroyTest(QdeviceTestCase):
+    def test_success(self, mock_net_destroy, mock_net_disable, mock_net_stop):
+        lib.qdevice_destroy(self.lib_env, "net")
+
+        mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd")
+        mock_net_disable.assert_called_once_with(
+            "mock_runner",
+            "corosync-qnetd"
+        )
+        mock_net_destroy.assert_called_once_with()
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_STOP_STARTED,
+                    {
+                        "service": "quorum device",
+                    }
+                ),
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_STOP_SUCCESS,
+                    {
+                        "service": "quorum device",
+                    }
+                ),
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_DISABLE_SUCCESS,
+                    {
+                        "service": "quorum device",
+                    }
+                ),
+                (
+                    severity.INFO,
+                    report_codes.QDEVICE_DESTROY_SUCCESS,
+                    {
+                        "model": "net",
+                    }
+                )
+            ]
+        )
+
+    def test_stop_failed(
+        self, mock_net_destroy, mock_net_disable, mock_net_stop
+    ):
+        mock_net_stop.side_effect = StopServiceError(
+            "test service",
+            "test error"
+        )
+
+        assert_raise_library_error(
+            lambda: lib.qdevice_destroy(self.lib_env, "net"),
+            (
+                severity.ERROR,
+                report_codes.SERVICE_STOP_ERROR,
+                {
+                    "service": "test service",
+                    "reason": "test error",
+                }
+            )
+        )
+
+        mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd")
+        mock_net_disable.assert_not_called()
+        mock_net_destroy.assert_not_called()
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_STOP_STARTED,
+                    {
+                        "service": "quorum device",
+                    }
+                )
+            ]
+        )
+
+    def test_disable_failed(
+        self, mock_net_destroy, mock_net_disable, mock_net_stop
+    ):
+        mock_net_disable.side_effect = DisableServiceError(
+            "test service",
+            "test error"
+        )
+
+        assert_raise_library_error(
+            lambda: lib.qdevice_destroy(self.lib_env, "net"),
+            (
+                severity.ERROR,
+                report_codes.SERVICE_DISABLE_ERROR,
+                {
+                    "service": "test service",
+                    "reason": "test error",
+                }
+            )
+        )
+
+        mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd")
+        mock_net_disable.assert_called_once_with(
+            "mock_runner",
+            "corosync-qnetd"
+        )
+        mock_net_destroy.assert_not_called()
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_STOP_STARTED,
+                    {
+                        "service": "quorum device",
+                    }
+                ),
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_STOP_SUCCESS,
+                    {
+                        "service": "quorum device",
+                    }
+                )
+            ]
+        )
+
+    def test_destroy_failed(
+        self, mock_net_destroy, mock_net_disable, mock_net_stop
+    ):
+        mock_net_destroy.side_effect = LibraryError("mock_report_item")
+
+        self.assertRaises(
+            LibraryError,
+            lambda: lib.qdevice_destroy(self.lib_env, "net")
+        )
+
+        mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd")
+        mock_net_disable.assert_called_once_with(
+            "mock_runner",
+            "corosync-qnetd"
+        )
+        mock_net_destroy.assert_called_once_with()
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_STOP_STARTED,
+                    {
+                        "service": "quorum device",
+                    }
+                ),
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_STOP_SUCCESS,
+                    {
+                        "service": "quorum device",
+                    }
+                ),
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_DISABLE_SUCCESS,
+                    {
+                        "service": "quorum device",
+                    }
+                )
+            ]
+        )
+
+
+ at mock.patch("pcs.lib.external.enable_service")
+ at mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+ at mock.patch.object(
+    LibraryEnvironment,
+    "cmd_runner",
+    lambda self: "mock_runner"
+)
+class QdeviceNetEnableTest(QdeviceTestCase):
+    def test_success(self, mock_net_enable):
+        lib.qdevice_enable(self.lib_env, "net")
+        mock_net_enable.assert_called_once_with("mock_runner", "corosync-qnetd")
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_ENABLE_SUCCESS,
+                    {
+                        "service": "quorum device",
+                    }
+                )
+            ]
+        )
+
+    def test_failed(self, mock_net_enable):
+        mock_net_enable.side_effect = EnableServiceError(
+            "test service",
+            "test error"
+        )
+
+        assert_raise_library_error(
+            lambda: lib.qdevice_enable(self.lib_env, "net"),
+            (
+                severity.ERROR,
+                report_codes.SERVICE_ENABLE_ERROR,
+                {
+                    "service": "test service",
+                    "reason": "test error",
+                }
+            )
+        )
+        mock_net_enable.assert_called_once_with("mock_runner", "corosync-qnetd")
+
+
+ at mock.patch("pcs.lib.external.disable_service")
+ at mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+ at mock.patch.object(
+    LibraryEnvironment,
+    "cmd_runner",
+    lambda self: "mock_runner"
+)
+class QdeviceNetDisableTest(QdeviceTestCase):
+    def test_success(self, mock_net_disable):
+        lib.qdevice_disable(self.lib_env, "net")
+        mock_net_disable.assert_called_once_with(
+            "mock_runner",
+            "corosync-qnetd"
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_DISABLE_SUCCESS,
+                    {
+                        "service": "quorum device",
+                    }
+                )
+            ]
+        )
+
+    def test_failed(self, mock_net_disable):
+        mock_net_disable.side_effect = DisableServiceError(
+            "test service",
+            "test error"
+        )
+
+        assert_raise_library_error(
+            lambda: lib.qdevice_disable(self.lib_env, "net"),
+            (
+                severity.ERROR,
+                report_codes.SERVICE_DISABLE_ERROR,
+                {
+                    "service": "test service",
+                    "reason": "test error",
+                }
+            )
+        )
+        mock_net_disable.assert_called_once_with(
+            "mock_runner",
+            "corosync-qnetd"
+        )
+
+
+ at mock.patch("pcs.lib.external.start_service")
+ at mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+ at mock.patch.object(
+    LibraryEnvironment,
+    "cmd_runner",
+    lambda self: "mock_runner"
+)
+class QdeviceNetStartTest(QdeviceTestCase):
+    def test_success(self, mock_net_start):
+        lib.qdevice_start(self.lib_env, "net")
+        mock_net_start.assert_called_once_with("mock_runner", "corosync-qnetd")
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_START_STARTED,
+                    {
+                        "service": "quorum device",
+                    }
+                ),
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_START_SUCCESS,
+                    {
+                        "service": "quorum device",
+                    }
+                )
+            ]
+        )
+
+    def test_failed(self, mock_net_start):
+        mock_net_start.side_effect = StartServiceError(
+            "test service",
+            "test error"
+        )
+
+        assert_raise_library_error(
+            lambda: lib.qdevice_start(self.lib_env, "net"),
+            (
+                severity.ERROR,
+                report_codes.SERVICE_START_ERROR,
+                {
+                    "service": "test service",
+                    "reason": "test error",
+                }
+            )
+        )
+        mock_net_start.assert_called_once_with("mock_runner", "corosync-qnetd")
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_START_STARTED,
+                    {
+                        "service": "quorum device",
+                    }
+                )
+            ]
+        )
+
+
+ at mock.patch("pcs.lib.external.stop_service")
+ at mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+ at mock.patch.object(
+    LibraryEnvironment,
+    "cmd_runner",
+    lambda self: "mock_runner"
+)
+class QdeviceNetStopTest(QdeviceTestCase):
+    def test_success(self, mock_net_stop):
+        lib.qdevice_stop(self.lib_env, "net")
+        mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd")
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_STOP_STARTED,
+                    {
+                        "service": "quorum device",
+                    }
+                ),
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_STOP_SUCCESS,
+                    {
+                        "service": "quorum device",
+                    }
+                )
+            ]
+        )
+
+    def test_failed(self, mock_net_stop):
+        mock_net_stop.side_effect = StopServiceError(
+            "test service",
+            "test error"
+        )
+
+        assert_raise_library_error(
+            lambda: lib.qdevice_stop(self.lib_env, "net"),
+            (
+                severity.ERROR,
+                report_codes.SERVICE_STOP_ERROR,
+                {
+                    "service": "test service",
+                    "reason": "test error",
+                }
+            )
+        )
+        mock_net_stop.assert_called_once_with("mock_runner", "corosync-qnetd")
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_STOP_STARTED,
+                    {
+                        "service": "quorum device",
+                    }
+                )
+            ]
+        )
+
+
+ at mock.patch("pcs.lib.external.kill_services")
+ at mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+ at mock.patch.object(
+    LibraryEnvironment,
+    "cmd_runner",
+    lambda self: "mock_runner"
+)
+class QdeviceNetKillTest(QdeviceTestCase):
+    def test_success(self, mock_net_kill):
+        lib.qdevice_kill(self.lib_env, "net")
+        mock_net_kill.assert_called_once_with(
+            "mock_runner",
+            ["corosync-qnetd"]
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.SERVICE_KILL_SUCCESS,
+                    {
+                        "services": ["quorum device"],
+                    }
+                )
+            ]
+        )
+
+    def test_failed(self, mock_net_kill):
+        mock_net_kill.side_effect = KillServicesError(
+            ["test service"],
+            "test error"
+        )
+
+        assert_raise_library_error(
+            lambda: lib.qdevice_kill(self.lib_env, "net"),
+            (
+                severity.ERROR,
+                report_codes.SERVICE_KILL_ERROR,
+                {
+                    "services": ["test service"],
+                    "reason": "test error",
+                }
+            )
+        )
+        mock_net_kill.assert_called_once_with(
+            "mock_runner",
+            ["corosync-qnetd"]
+        )
diff --git a/pcs/test/test_lib_commands_quorum.py b/pcs/test/test_lib_commands_quorum.py
index 172f895..5725381 100644
--- a/pcs/test/test_lib_commands_quorum.py
+++ b/pcs/test/test_lib_commands_quorum.py
@@ -297,7 +297,7 @@ class AddDeviceTest(TestCase, CmanMixin):
         lib.add_device(
             lib_env,
             "net",
-            {"host": "127.0.0.1"},
+            {"host": "127.0.0.1", "algorithm": "ffsplit"},
             {"timeout": "12345"}
         )
 
@@ -313,6 +313,7 @@ class AddDeviceTest(TestCase, CmanMixin):
         model: net
 
         net {
+            algorithm: ffsplit
             host: 127.0.0.1
         }
     }
@@ -331,7 +332,7 @@ class AddDeviceTest(TestCase, CmanMixin):
             lambda: lib.add_device(
                 lib_env,
                 "net",
-                {"host": "127.0.0.1", },
+                {"host": "127.0.0.1", "algorithm": "ffsplit"},
                 {"bad_option": "bad_value", }
             ),
             (
@@ -358,7 +359,7 @@ class AddDeviceTest(TestCase, CmanMixin):
         lib.add_device(
             lib_env,
             "net",
-            {"host": "127.0.0.1", },
+            {"host": "127.0.0.1", "algorithm": "ffsplit"},
             {"bad_option": "bad_value", },
             force_options=True
         )
@@ -390,6 +391,7 @@ class AddDeviceTest(TestCase, CmanMixin):
         model: net
 
         net {
+            algorithm: ffsplit
             host: 127.0.0.1
         }
     }
diff --git a/pcs/test/test_lib_commands_sbd.py b/pcs/test/test_lib_commands_sbd.py
new file mode 100644
index 0000000..9a96757
--- /dev/null
+++ b/pcs/test/test_lib_commands_sbd.py
@@ -0,0 +1,668 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import logging
+
+from unittest import TestCase
+
+from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.assertions import (
+    assert_raise_library_error,
+    assert_report_item_list_equal,
+)
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+
+from pcs.common import report_codes
+from pcs.lib.errors import (
+    ReportItemSeverity as Severities,
+    LibraryError,
+    ReportItem,
+)
+from pcs.lib.env import LibraryEnvironment
+from pcs.lib.node import (
+    NodeAddresses,
+    NodeAddressesList,
+)
+from pcs.lib.external import (
+    NodeCommunicator,
+    NodeConnectionException,
+    CommandRunner,
+)
+import pcs.lib.commands.sbd as cmd_sbd
+
+
+class CommandSbdTest(TestCase):
+    def setUp(self):
+        self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment)
+        self.mock_log = mock.MagicMock(spec_set=logging.Logger)
+        self.mock_env.logger = self.mock_log
+        self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
+        self.mock_env.node_communicator.return_value = self.mock_com
+        self.mock_run = mock.MagicMock(spec_set=CommandRunner)
+        self.mock_env.cmd_runner.return_value = self.mock_run
+        self.mock_rep = MockLibraryReportProcessor()
+        self.mock_env.report_processor = self.mock_rep
+
+        self.node_list = NodeAddressesList(
+            [NodeAddresses("node" + str(i)) for i in range(3)]
+        )
+
+
+class ValidateSbdOptionsTest(TestCase):
+    def setUp(self):
+        self.allowed_sbd_options = sorted([
+            "SBD_DELAY_START", "SBD_STARTMODE", "SBD_WATCHDOG_TIMEOUT"
+        ])
+        self.allowed_sbd_options_str = ", ".join(self.allowed_sbd_options)
+
+    def test_all_ok(self):
+        config = {
+            "SBD_DELAY_START": "yes",
+            "SBD_WATCHDOG_TIMEOUT": "5",
+            "SBD_STARTMODE": "clean"
+        }
+        self.assertEqual([], cmd_sbd._validate_sbd_options(config))
+
+    def test_unknown_options(self):
+        config = {
+            "SBD_DELAY_START": "yes",
+            "SBD_WATCHDOG_TIMEOUT": "5",
+            "SBD_STARTMODE": "clean",
+            "SBD_UNKNOWN": "",
+            "another_unknown_option": "some value"
+        }
+        assert_report_item_list_equal(
+            cmd_sbd._validate_sbd_options(config),
+            [
+                (
+                    Severities.ERROR,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_name": "SBD_UNKNOWN",
+                        "option_type": None,
+                        "allowed": self.allowed_sbd_options,
+                        "allowed_str": self.allowed_sbd_options_str
+                    },
+                    report_codes.FORCE_OPTIONS
+                ),
+                (
+                    Severities.ERROR,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_name": "another_unknown_option",
+                        "option_type": None,
+                        "allowed": self.allowed_sbd_options,
+                        "allowed_str": self.allowed_sbd_options_str
+                    },
+                    report_codes.FORCE_OPTIONS
+                )
+            ]
+        )
+
+    def test_unknown_options_forced(self):
+        config = {
+            "SBD_DELAY_START": "yes",
+            "SBD_WATCHDOG_TIMEOUT": "5",
+            "SBD_STARTMODE": "clean",
+            "SBD_UNKNOWN": "",
+            "another_unknown_option": "some value"
+        }
+        # just make sure there is no exception raised
+        assert_report_item_list_equal(
+            cmd_sbd._validate_sbd_options(config, allow_unknown_opts=True),
+            [
+                (
+                    Severities.WARNING,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_name": "SBD_UNKNOWN",
+                        "option_type": None,
+                        "allowed": self.allowed_sbd_options,
+                        "allowed_str": self.allowed_sbd_options_str
+                    },
+                    None
+                ),
+                (
+                    Severities.WARNING,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_name": "another_unknown_option",
+                        "option_type": None,
+                        "allowed": self.allowed_sbd_options,
+                        "allowed_str": self.allowed_sbd_options_str
+                    },
+                    None
+                )
+            ]
+        )
+
+
+    def test_unsupported_options(self):
+        config = {
+            "SBD_DELAY_START": "yes",
+            "SBD_WATCHDOG_TIMEOUT": "5",
+            "SBD_STARTMODE": "clean",
+            "SBD_WATCHDOG_DEV": "/dev/watchdog",
+            "SBD_OPTS": "  "
+        }
+
+        assert_report_item_list_equal(
+            cmd_sbd._validate_sbd_options(config),
+            [
+                (
+                    Severities.ERROR,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_name": "SBD_WATCHDOG_DEV",
+                        "option_type": None,
+                        "allowed": self.allowed_sbd_options,
+                        "allowed_str": self.allowed_sbd_options_str
+                    },
+                    None
+                ),
+                (
+                    Severities.ERROR,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_name": "SBD_OPTS",
+                        "option_type": None,
+                        "allowed": self.allowed_sbd_options,
+                        "allowed_str": self.allowed_sbd_options_str
+                    },
+                    None
+                )
+            ]
+        )
+
+    def test_invalid_and_unsupported_options(self):
+        config = {
+            "SBD_DELAY_START": "yes",
+            "SBD_WATCHDOG_TIMEOUT": "5",
+            "SBD_STARTMODE": "clean",
+            "SBD_WATCHDOG_DEV": "/dev/watchdog",
+            "SBD_UNKNOWN": "",
+            "SBD_OPTS": "  "
+        }
+
+        assert_report_item_list_equal(
+            cmd_sbd._validate_sbd_options(config),
+            [
+                (
+                    Severities.ERROR,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_name": "SBD_WATCHDOG_DEV",
+                        "option_type": None,
+                        "allowed": self.allowed_sbd_options,
+                        "allowed_str": self.allowed_sbd_options_str
+                    },
+                    None
+                ),
+                (
+                    Severities.ERROR,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_name": "SBD_OPTS",
+                        "option_type": None,
+                        "allowed": self.allowed_sbd_options,
+                        "allowed_str": self.allowed_sbd_options_str
+                    },
+                    None
+                ),
+                (
+                    Severities.ERROR,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_name": "SBD_UNKNOWN",
+                        "option_type": None,
+                        "allowed": self.allowed_sbd_options,
+                        "allowed_str": self.allowed_sbd_options_str
+                    },
+                    report_codes.FORCE_OPTIONS
+                )
+            ]
+        )
+
+    def test_invalid_and_unsupported_options_forced(self):
+        config = {
+            "SBD_DELAY_START": "yes",
+            "SBD_WATCHDOG_TIMEOUT": "5",
+            "SBD_STARTMODE": "clean",
+            "SBD_WATCHDOG_DEV": "/dev/watchdog",
+            "SBD_UNKNOWN": "",
+            "SBD_OPTS": "  "
+        }
+
+        assert_report_item_list_equal(
+            cmd_sbd._validate_sbd_options(config, allow_unknown_opts=True),
+            [
+                (
+                    Severities.ERROR,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_name": "SBD_WATCHDOG_DEV",
+                        "option_type": None,
+                        "allowed": self.allowed_sbd_options,
+                        "allowed_str": self.allowed_sbd_options_str
+                    },
+                    None
+                ),
+                (
+                    Severities.ERROR,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_name": "SBD_OPTS",
+                        "option_type": None,
+                        "allowed": self.allowed_sbd_options,
+                        "allowed_str": self.allowed_sbd_options_str
+                    },
+                    None
+                ),
+                (
+                    Severities.WARNING,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_name": "SBD_UNKNOWN",
+                        "option_type": None,
+                        "allowed": self.allowed_sbd_options,
+                        "allowed_str": self.allowed_sbd_options_str
+                    },
+                    None
+                )
+            ]
+        )
+
+
+class GetFullWatchdogListTest(TestCase):
+    def setUp(self):
+        self.node_list = NodeAddressesList(
+            [NodeAddresses("node" + str(i)) for i in range(5)]
+        )
+
+    def test_full(self):
+        watchdog_dict = {
+            self.node_list[1].label: "/dev/watchdog1",
+            self.node_list[2].label: "/dev/watchdog2"
+        }
+        expected = {
+            self.node_list[0]: "/dev/default",
+            self.node_list[1]: "/dev/watchdog1",
+            self.node_list[2]: "/dev/watchdog2",
+            self.node_list[3]: "/dev/default",
+            self.node_list[4]: "/dev/default",
+        }
+        self.assertEqual(
+            cmd_sbd._get_full_watchdog_list(
+                self.node_list, "/dev/default", watchdog_dict
+            ),
+            expected
+        )
+
+    def test_unknown_nodes(self):
+        watchdog_dict = {
+            self.node_list[1].label: "/dev/watchdog1",
+            self.node_list[2].label: "/dev/watchdog2",
+            "unknown_node": "/dev/watchdog0",
+            "another_unknown_node": "/dev/watchdog"
+        }
+        assert_raise_library_error(
+            lambda: cmd_sbd._get_full_watchdog_list(
+                self.node_list, "/dev/dog", watchdog_dict
+            ),
+            (
+                Severities.ERROR,
+                report_codes.NODE_NOT_FOUND,
+                {"node": "unknown_node"}
+            ),
+            (
+                Severities.ERROR,
+                report_codes.NODE_NOT_FOUND,
+                {"node": "another_unknown_node"}
+            )
+        )
+
+
+ at mock.patch("pcs.lib.commands.sbd._get_cluster_nodes")
+ at mock.patch("pcs.lib.sbd.check_sbd")
+class GetClusterSbdStatusTest(CommandSbdTest):
+    def test_success(self, mock_check_sbd, mock_get_nodes):
+        def ret_val(communicator, node, empty_str):
+            self.assertEqual(communicator, self.mock_com)
+            self.assertEqual(empty_str, "")
+            if node.label == "node0":
+                return """{
+                    "sbd": {
+                        "installed": true,
+                        "enabled": true,
+                        "running": false
+                    }
+                }"""
+            elif node.label == "node1":
+                return """{
+                    "sbd": {
+                        "installed": false,
+                        "enabled": false,
+                        "running": false
+                    }
+                }"""
+            elif node.label == "node2":
+                return """{
+                    "sbd": {
+                        "installed": true,
+                        "enabled": false,
+                        "running": false
+                    }
+                }"""
+            else:
+                raise AssertionError(
+                    "Unexpected call: node={node}, node.label={label}".format(
+                        node=str(node), label=node.label
+                    )
+                )
+
+        mock_check_sbd.side_effect = ret_val
+        self.mock_env.is_cman_cluster = False
+        mock_get_nodes.return_value = self.node_list
+        expected = [
+            {
+                "node": self.node_list.find_by_label("node0"),
+                "status": {
+                    "installed": True,
+                    "enabled": True,
+                    "running": False
+                }
+            },
+            {
+                "node": self.node_list.find_by_label("node1"),
+                "status": {
+                    "installed": False,
+                    "enabled": False,
+                    "running": False
+                }
+            },
+            {
+                "node": self.node_list.find_by_label("node2"),
+                "status": {
+                    "installed": True,
+                    "enabled": False,
+                    "running": False
+                }
+            }
+        ]
+
+        self.assertEqual(
+            expected, cmd_sbd.get_cluster_sbd_status(self.mock_env)
+        )
+        mock_get_nodes.assert_called_once_with(self.mock_env)
+        self.assertEqual(3, mock_check_sbd.call_count)
+        self.assertEqual(self.mock_log.warning.call_count, 0)
+
+    def test_failures(self, mock_check_sbd, mock_get_nodes):
+        def ret_val(communicator, node, empty_str):
+            self.assertEqual(communicator, self.mock_com)
+            self.assertEqual(empty_str, "")
+            if node.label == "node0":
+                return """{
+                    "not_sbd": {
+                        "installed": true,
+                        "enabled": true,
+                        "running": false
+                    }
+                }"""
+            elif node.label == "node1":
+                raise NodeConnectionException(node.label, "command", "reason")
+            elif node.label == "node2":
+                return "invalid_json"
+            else:
+                raise AssertionError(
+                    "Unexpected call: node={node}, node.label={label}".format(
+                        node=str(node), label=node.label
+                    )
+                )
+
+        mock_check_sbd.side_effect = ret_val
+        self.mock_env.is_cman_cluster = False
+        mock_get_nodes.return_value = self.node_list
+        all_none = {
+            "installed": None,
+            "enabled": None,
+            "running": None
+        }
+        expected = [
+            {
+                "node": self.node_list.find_by_label("node0"),
+                "status": all_none
+            },
+            {
+                "node": self.node_list.find_by_label("node1"),
+                "status": all_none
+            },
+            {
+                "node": self.node_list.find_by_label("node2"),
+                "status": all_none
+            }
+        ]
+
+        self.assertEqual(
+            expected, cmd_sbd.get_cluster_sbd_status(self.mock_env)
+        )
+        mock_get_nodes.assert_called_once_with(self.mock_env)
+        self.assertEqual(3, mock_check_sbd.call_count)
+        assert_report_item_list_equal(
+            self.mock_rep.report_item_list,
+            [
+                (
+                    Severities.WARNING,
+                    report_codes.UNABLE_TO_GET_SBD_STATUS,
+                    {"node": "node0"}
+                ),
+                (
+                    Severities.WARNING,
+                    report_codes.UNABLE_TO_GET_SBD_STATUS,
+                    {"node": "node1"}
+                ),
+                (
+                    Severities.WARNING,
+                    report_codes.UNABLE_TO_GET_SBD_STATUS,
+                    {"node": "node2"}
+                )
+            ]
+        )
+
+    def test_cman_cluster(self, mock_check_sbd, mock_get_nodes):
+        self.mock_env.is_cman_cluster = True
+        assert_raise_library_error(
+            lambda: cmd_sbd.get_cluster_sbd_status(self.mock_env),
+            (
+                Severities.ERROR,
+                report_codes.CMAN_UNSUPPORTED_COMMAND,
+                {}
+            )
+        )
+
+
+ at mock.patch("pcs.lib.commands.sbd._get_cluster_nodes")
+ at mock.patch("pcs.lib.sbd.get_sbd_config")
+class GetClusterSbdConfigTest(CommandSbdTest):
+    def test_success(self, mock_sbd_cfg, mock_get_nodes):
+        this = self
+
+        def ret_val(communicator, node):
+            this.assertEqual(communicator, this.mock_com)
+            if node.label == "node0":
+                return """\
+# comment
+SBD_TEST=true
+ANOTHER_OPT=1
+"""
+            elif node.label == "node1":
+                return """\
+OPTION=   value
+
+"""
+            elif node.label == "node2":
+                return """\
+
+# just comment
+
+"""
+            else:
+                raise AssertionError(
+                    "Unexpected call: node={node}, node.label={label}".format(
+                        node=str(node), label=node.label
+                    )
+                )
+
+        mock_sbd_cfg.side_effect = ret_val
+        self.mock_env.is_cman_cluster = False
+        mock_get_nodes.return_value = self.node_list
+        expected = [
+            {
+                "node": self.node_list.find_by_label("node0"),
+                "config": {
+                    "SBD_TEST": "true",
+                    "ANOTHER_OPT": "1"
+                }
+            },
+            {
+                "node": self.node_list.find_by_label("node1"),
+                "config": {"OPTION": "value"}
+            },
+            {
+                "node": self.node_list.find_by_label("node2"),
+                "config": {}
+            }
+        ]
+
+        self.assertEqual(
+            expected, cmd_sbd.get_cluster_sbd_config(self.mock_env)
+        )
+        mock_get_nodes.assert_called_once_with(self.mock_env)
+        self.assertEqual(3, mock_sbd_cfg.call_count)
+        self.assertEqual(self.mock_log.warning.call_count, 0)
+
+    def test_few_failures(self, mock_sbd_cfg, mock_get_nodes):
+        def ret_val(communicator, node):
+            self.assertEqual(communicator, self.mock_com)
+            if node.label == "node0":
+                return """\
+            # comment
+            SBD_TEST=true
+            ANOTHER_OPT=1
+            """
+            elif node.label == "node1":
+                return """\
+invalid value
+
+            """
+            elif node.label == "node2":
+                raise NodeConnectionException(node.label, "command", "reason")
+            else:
+                raise AssertionError(
+                    "Unexpected call: node={node}, node.label={label}".format(
+                        node=str(node), label=node.label
+                    )
+                )
+
+        mock_sbd_cfg.side_effect = ret_val
+        self.mock_env.is_cman_cluster = False
+        mock_get_nodes.return_value = self.node_list
+        expected = [
+            {
+                "node": self.node_list.find_by_label("node0"),
+                "config": {
+                    "SBD_TEST": "true",
+                    "ANOTHER_OPT": "1"
+                }
+            },
+            {
+                "node": self.node_list.find_by_label("node1"),
+                "config": {}
+            },
+            {
+                "node": self.node_list.find_by_label("node2"),
+                "config": None
+            }
+        ]
+
+        self.assertEqual(
+            expected, cmd_sbd.get_cluster_sbd_config(self.mock_env)
+        )
+        mock_get_nodes.assert_called_once_with(self.mock_env)
+        self.assertEqual(3, mock_sbd_cfg.call_count)
+        assert_report_item_list_equal(
+            self.mock_rep.report_item_list,
+            [(
+                Severities.WARNING,
+                report_codes.UNABLE_TO_GET_SBD_CONFIG,
+                {"node": "node2"}
+            )]
+        )
+
+
+    def test_cman_cluster(self, mock_sbd_cfg, mock_get_nodes):
+        self.mock_env.is_cman_cluster = True
+        assert_raise_library_error(
+            lambda: cmd_sbd.get_cluster_sbd_config(self.mock_env),
+            (
+                Severities.ERROR,
+                report_codes.CMAN_UNSUPPORTED_COMMAND,
+                {}
+            )
+        )
+
+
+ at mock.patch("pcs.lib.sbd.get_local_sbd_config")
+class GetLocalSbdConfigTest(TestCase):
+    def setUp(self):
+        self.mock_env = mock.MagicMock(spec_set=LibraryEnvironment)
+
+    def test_success(self, mock_config):
+        self.mock_env.is_cman_cluster = False
+        mock_config.return_value = """
+# This file has been generated by pcs.
+SBD_OPTS="-n node1"
+SBD_WATCHDOG_DEV=/dev/watchdog
+SBD_WATCHDOG_TIMEOUT=0
+"""
+
+        self.assertEqual(
+            {
+                "SBD_OPTS": '"-n node1"',
+                "SBD_WATCHDOG_DEV": "/dev/watchdog",
+                "SBD_WATCHDOG_TIMEOUT": "0"
+            },
+            cmd_sbd.get_local_sbd_config(self.mock_env)
+        )
+        self.assertEqual(1, mock_config.call_count)
+
+    def test_cman_cluster(self, mock_config):
+        self.mock_env.is_cman_cluster = True
+        assert_raise_library_error(
+            lambda: cmd_sbd.get_local_sbd_config(self.mock_env),
+            (
+                Severities.ERROR,
+                report_codes.CMAN_UNSUPPORTED_COMMAND,
+                {}
+            )
+        )
+        self.assertEqual(0, mock_config.call_count)
+
+    def test_file_error(self, mock_config):
+        self.mock_env.is_cman_cluster = False
+        mock_config.side_effect = LibraryError(ReportItem.error(
+            report_codes.UNABLE_TO_GET_SBD_CONFIG,
+            "message"
+        ))
+        assert_raise_library_error(
+            lambda: cmd_sbd.get_local_sbd_config(self.mock_env),
+            (
+                Severities.ERROR,
+                report_codes.UNABLE_TO_GET_SBD_CONFIG,
+                {}
+            )
+        )
diff --git a/pcs/test/test_lib_corosync_config_facade.py b/pcs/test/test_lib_corosync_config_facade.py
index 3c18d65..5700016 100644
--- a/pcs/test/test_lib_corosync_config_facade.py
+++ b/pcs/test/test_lib_corosync_config_facade.py
@@ -733,6 +733,7 @@ quorum {
 
         net {
             host: 127.0.0.1
+            algorithm: ffsplit
         }
     }
 }
@@ -743,7 +744,7 @@ quorum {
             lambda: facade.add_quorum_device(
                 reporter,
                 "net",
-                {"host": "127.0.0.1"},
+                {"host": "127.0.0.1", "algorithm": "ffsplit"},
                 {}
             ),
             (
@@ -762,7 +763,7 @@ quorum {
         facade.add_quorum_device(
             reporter,
             "net",
-            {"host": "127.0.0.1"},
+            {"host": "127.0.0.1", "algorithm": "ffsplit"},
             {}
         )
         ac(
@@ -775,6 +776,7 @@ quorum {
         model: net
 
         net {
+            algorithm: ffsplit
             host: 127.0.0.1
         }
     }"""
@@ -968,7 +970,7 @@ quorum {
         facade.add_quorum_device(
             reporter,
             "net",
-            {"host": "127.0.0.1"},
+            {"host": "127.0.0.1", "algorithm": "ffsplit"},
             {}
         )
         ac(
@@ -982,6 +984,7 @@ quorum {
         model: net
 
         net {
+            algorithm: ffsplit
             host: 127.0.0.1
         }
     }
@@ -1013,7 +1016,7 @@ quorum {
         facade.add_quorum_device(
             reporter,
             "net",
-            {"host": "127.0.0.1"},
+            {"host": "127.0.0.1", "algorithm": "ffsplit"},
             {}
         )
         ac(
@@ -1029,6 +1032,7 @@ quorum {
         model: net
 
         net {
+            algorithm: ffsplit
             host: 127.0.0.1
         }
     }
@@ -1103,6 +1107,11 @@ quorum {
                 severity.ERROR,
                 report_codes.REQUIRED_OPTION_IS_MISSING,
                 {"option_name": "host"}
+            ),
+            (
+                severity.ERROR,
+                report_codes.REQUIRED_OPTION_IS_MISSING,
+                {"option_name": "algorithm"}
             )
         )
         self.assertFalse(facade.need_stopped_cluster)
@@ -1260,6 +1269,11 @@ quorum {
                 severity.ERROR,
                 report_codes.REQUIRED_OPTION_IS_MISSING,
                 {"option_name": "host"}
+            ),
+            (
+                severity.ERROR,
+                report_codes.REQUIRED_OPTION_IS_MISSING,
+                {"option_name": "algorithm"}
             )
         )
         self.assertFalse(facade.need_stopped_cluster)
@@ -1271,13 +1285,18 @@ quorum {
         facade = lib.ConfigFacade.from_string(config)
         assert_raise_library_error(
             lambda: facade.add_quorum_device(
-                reporter, "net", {"host": ""}, {},
+                reporter, "net", {"host": "", "algorithm": ""}, {},
                 force_model=True, force_options=True
             ),
             (
                 severity.ERROR,
                 report_codes.REQUIRED_OPTION_IS_MISSING,
                 {"option_name": "host"}
+            ),
+            (
+                severity.ERROR,
+                report_codes.REQUIRED_OPTION_IS_MISSING,
+                {"option_name": "algorithm"}
             )
         )
         self.assertFalse(facade.need_stopped_cluster)
@@ -1508,7 +1527,7 @@ quorum {
         )
         self.assertEqual([], reporter.report_item_list)
 
-    def test_success_net_doesnt_require_host(self):
+    def test_success_net_doesnt_require_host_and_algorithm(self):
         config = self.fixture_add_device(
             open(rc("corosync-3nodes.conf")).read()
         )
@@ -1525,24 +1544,43 @@ quorum {
         )
         self.assertEqual([], reporter.report_item_list)
 
-    def test_net_host_cannot_be_removed(self):
+    def test_net_required_options_cannot_be_removed(self):
         config = self.fixture_add_device(
             open(rc("corosync-3nodes.conf")).read()
         )
         reporter = MockLibraryReportProcessor()
         facade = lib.ConfigFacade.from_string(config)
         assert_raise_library_error(
-            lambda: facade.update_quorum_device(reporter, {"host": ""}, {}),
+            lambda: facade.update_quorum_device(
+                reporter,
+                {"host": "", "algorithm": ""},
+                {}
+            ),
             (
                 severity.ERROR,
                 report_codes.REQUIRED_OPTION_IS_MISSING,
                 {"option_name": "host"},
+            ),
+            (
+                severity.ERROR,
+                report_codes.REQUIRED_OPTION_IS_MISSING,
+                {"option_name": "algorithm"}
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "algorithm",
+                    "option_value": "",
+                    "allowed_values": ("2nodelms", "ffsplit", "lms")
+                },
+                report_codes.FORCE_OPTIONS
             )
         )
         self.assertFalse(facade.need_stopped_cluster)
         ac(config, facade.config.export())
 
-    def test_net_host_cannot_be_removed_forced(self):
+    def test_net_required_options_cannot_be_removed_forced(self):
         config = self.fixture_add_device(
             open(rc("corosync-3nodes.conf")).read()
         )
@@ -1550,12 +1588,20 @@ quorum {
         facade = lib.ConfigFacade.from_string(config)
         assert_raise_library_error(
             lambda: facade.update_quorum_device(
-                reporter, {"host": ""}, {}, force_options=True
+                reporter,
+                {"host": "", "algorithm": ""},
+                {},
+                force_options=True
             ),
             (
                 severity.ERROR,
                 report_codes.REQUIRED_OPTION_IS_MISSING,
                 {"option_name": "host"},
+            ),
+            (
+                severity.ERROR,
+                report_codes.REQUIRED_OPTION_IS_MISSING,
+                {"option_name": "algorithm"}
             )
         )
         self.assertFalse(facade.need_stopped_cluster)
diff --git a/pcs/test/test_lib_corosync_qdevice_net.py b/pcs/test/test_lib_corosync_qdevice_net.py
new file mode 100644
index 0000000..38bc9c8
--- /dev/null
+++ b/pcs/test/test_lib_corosync_qdevice_net.py
@@ -0,0 +1,91 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+
+from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.assertions import assert_raise_library_error
+
+from pcs.common import report_codes
+from pcs.lib.errors import ReportItemSeverity as severity
+from pcs.lib.external import CommandRunner
+
+import pcs.lib.corosync.qdevice_net as lib
+
+
+_qnetd_cert_dir = "/etc/corosync/qdevice/net/qnetd/nssdb"
+_qnetd_tool = "/usr/sbin/corosync-qnetd-certutil"
+
+ at mock.patch("pcs.lib.corosync.qdevice_net.external.is_dir_nonempty")
+class QdeviceSetupTest(TestCase):
+    def setUp(self):
+        self.mock_runner = mock.MagicMock(spec_set=CommandRunner)
+
+    def test_success(self, mock_is_dir_nonempty):
+        mock_is_dir_nonempty.return_value = False
+        self.mock_runner.run.return_value = ("initialized", 0)
+
+        lib.qdevice_setup(self.mock_runner)
+
+        mock_is_dir_nonempty.assert_called_once_with(_qnetd_cert_dir)
+        self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-i"])
+
+    def test_cert_db_exists(self, mock_is_dir_nonempty):
+        mock_is_dir_nonempty.return_value = True
+
+        assert_raise_library_error(
+            lambda: lib.qdevice_setup(self.mock_runner),
+            (
+                severity.ERROR,
+                report_codes.QDEVICE_ALREADY_INITIALIZED,
+                {"model": "net"}
+            )
+        )
+
+        mock_is_dir_nonempty.assert_called_once_with(_qnetd_cert_dir)
+        self.mock_runner.assert_not_called()
+
+    def test_init_tool_fail(self, mock_is_dir_nonempty):
+        mock_is_dir_nonempty.return_value = False
+        self.mock_runner.run.return_value = ("test error", 1)
+
+        assert_raise_library_error(
+            lambda: lib.qdevice_setup(self.mock_runner),
+            (
+                severity.ERROR,
+                report_codes.QDEVICE_INITIALIZATION_ERROR,
+                {
+                    "model": "net",
+                    "reason": "test error",
+                }
+            )
+        )
+
+        mock_is_dir_nonempty.assert_called_once_with(_qnetd_cert_dir)
+        self.mock_runner.run.assert_called_once_with([_qnetd_tool, "-i"])
+
+
+ at mock.patch("pcs.lib.corosync.qdevice_net.shutil.rmtree")
+class QdeviceDestroyTest(TestCase):
+    def test_success(self, mock_rmtree):
+        lib.qdevice_destroy()
+        mock_rmtree.assert_called_once_with(_qnetd_cert_dir)
+
+    def test_cert_dir_rm_error(self, mock_rmtree):
+        mock_rmtree.side_effect = EnvironmentError("test errno", "test message")
+        assert_raise_library_error(
+            lib.qdevice_destroy,
+            (
+                severity.ERROR,
+                report_codes.QDEVICE_DESTROY_ERROR,
+                {
+                    "model": "net",
+                    "reason": "test message",
+                }
+            )
+        )
+        mock_rmtree.assert_called_once_with(_qnetd_cert_dir)
diff --git a/pcs/test/test_lib_external.py b/pcs/test/test_lib_external.py
index 0e5f8a5..c08b059 100644
--- a/pcs/test/test_lib_external.py
+++ b/pcs/test/test_lib_external.py
@@ -635,6 +635,13 @@ class NodeCommunicatorTest(TestCase):
             lib.NodeUnsupportedCommandException
         )
 
+    def test_command_unsuccessful(self, mock_get_opener):
+        self.base_test_http_error(
+            mock_get_opener,
+            400,
+            lib.NodeCommandUnsuccessfulException
+        )
+
     def test_other_error(self, mock_get_opener):
         self.base_test_http_error(
             mock_get_opener,
@@ -692,6 +699,26 @@ class NodeCommunicatorTest(TestCase):
 
 
 class NodeCommunicatorExceptionTransformTest(TestCase):
+    def test_transform_error_400(self):
+        node = "test_node"
+        command = "test_command"
+        reason = "test_reason"
+
+        assert_report_item_equal(
+            lib.node_communicator_exception_to_report_item(
+                lib.NodeCommandUnsuccessfulException(node, command, reason)
+            ),
+            (
+                severity.ERROR,
+                report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+                {
+                    "node": node,
+                    "command": command,
+                    "reason": reason,
+                }
+            )
+        )
+
     def test_transform_error_401(self):
         node = "test_node"
         command = "test_command"
@@ -858,3 +885,464 @@ Copyright (c) 2006-2009 Red Hat, Inc.
 """,
             1
         )
+
+
+ at mock.patch("pcs.lib.external.is_systemctl")
+class DisableServiceTest(TestCase):
+    def setUp(self):
+        self.mock_runner = mock.MagicMock(spec_set=lib.CommandRunner)
+        self.service = "service_name"
+
+    def test_systemctl(self, mock_systemctl):
+        mock_systemctl.return_value = True
+        self.mock_runner.run.return_value = ("", 0)
+        lib.disable_service(self.mock_runner, self.service)
+        self.mock_runner.run.assert_called_once_with(
+            ["systemctl", "disable", self.service + ".service"]
+        )
+
+    def test_systemctl_failed(self, mock_systemctl):
+        mock_systemctl.return_value = True
+        self.mock_runner.run.return_value = ("", 1)
+        self.assertRaises(
+            lib.DisableServiceError,
+            lambda: lib.disable_service(self.mock_runner, self.service)
+        )
+        self.mock_runner.run.assert_called_once_with(
+            ["systemctl", "disable", self.service + ".service"]
+        )
+
+    @mock.patch("pcs.lib.external.is_service_installed")
+    def test_not_systemctl(self, mock_is_installed, mock_systemctl):
+        mock_is_installed.return_value = True
+        mock_systemctl.return_value = False
+        self.mock_runner.run.return_value = ("", 0)
+        lib.disable_service(self.mock_runner, self.service)
+        self.mock_runner.run.assert_called_once_with(
+            ["chkconfig", self.service, "off"]
+        )
+
+    @mock.patch("pcs.lib.external.is_service_installed")
+    def test_not_systemctl_failed(self, mock_is_installed, mock_systemctl):
+        mock_is_installed.return_value = True
+        mock_systemctl.return_value = False
+        self.mock_runner.run.return_value = ("", 1)
+        self.assertRaises(
+            lib.DisableServiceError,
+            lambda: lib.disable_service(self.mock_runner, self.service)
+        )
+        self.mock_runner.run.assert_called_once_with(
+            ["chkconfig", self.service, "off"]
+        )
+
+    @mock.patch("pcs.lib.external.is_service_installed")
+    def test_not_systemctl_not_installed(
+            self, mock_is_installed, mock_systemctl
+    ):
+        mock_is_installed.return_value = False
+        mock_systemctl.return_value = False
+        lib.disable_service(self.mock_runner, self.service)
+        self.assertEqual(self.mock_runner.run.call_count, 0)
+
+
+ at mock.patch("pcs.lib.external.is_systemctl")
+class EnableServiceTest(TestCase):
+    def setUp(self):
+        self.mock_runner = mock.MagicMock(spec_set=lib.CommandRunner)
+        self.service = "service_name"
+
+    def test_systemctl(self, mock_systemctl):
+        mock_systemctl.return_value = True
+        self.mock_runner.run.return_value = ("", 0)
+        lib.enable_service(self.mock_runner, self.service)
+        self.mock_runner.run.assert_called_once_with(
+            ["systemctl", "enable", self.service + ".service"]
+        )
+
+    def test_systemctl_failed(self, mock_systemctl):
+        mock_systemctl.return_value = True
+        self.mock_runner.run.return_value = ("", 1)
+        self.assertRaises(
+            lib.EnableServiceError,
+            lambda: lib.enable_service(self.mock_runner, self.service)
+        )
+        self.mock_runner.run.assert_called_once_with(
+            ["systemctl", "enable", self.service + ".service"]
+        )
+
+    def test_not_systemctl(self, mock_systemctl):
+        mock_systemctl.return_value = False
+        self.mock_runner.run.return_value = ("", 0)
+        lib.enable_service(self.mock_runner, self.service)
+        self.mock_runner.run.assert_called_once_with(
+            ["chkconfig", self.service, "on"]
+        )
+
+    def test_not_systemctl_failed(self, mock_systemctl):
+        mock_systemctl.return_value = False
+        self.mock_runner.run.return_value = ("", 1)
+        self.assertRaises(
+            lib.EnableServiceError,
+            lambda: lib.enable_service(self.mock_runner, self.service)
+        )
+        self.mock_runner.run.assert_called_once_with(
+            ["chkconfig", self.service, "on"]
+        )
+
+
+ at mock.patch("pcs.lib.external.is_systemctl")
+class StartServiceTest(TestCase):
+    def setUp(self):
+        self.mock_runner = mock.MagicMock(spec_set=lib.CommandRunner)
+        self.service = "service_name"
+
+    def test_systemctl(self, mock_systemctl):
+        mock_systemctl.return_value = True
+        self.mock_runner.run.return_value = ("", 0)
+        lib.start_service(self.mock_runner, self.service)
+        self.mock_runner.run.assert_called_once_with(
+            ["systemctl", "start", self.service + ".service"]
+        )
+
+    def test_systemctl_failed(self, mock_systemctl):
+        mock_systemctl.return_value = True
+        self.mock_runner.run.return_value = ("", 1)
+        self.assertRaises(
+            lib.StartServiceError,
+            lambda: lib.start_service(self.mock_runner, self.service)
+        )
+        self.mock_runner.run.assert_called_once_with(
+            ["systemctl", "start", self.service + ".service"]
+        )
+
+    def test_not_systemctl(self, mock_systemctl):
+        mock_systemctl.return_value = False
+        self.mock_runner.run.return_value = ("", 0)
+        lib.start_service(self.mock_runner, self.service)
+        self.mock_runner.run.assert_called_once_with(
+            ["service", self.service, "start"]
+        )
+
+    def test_not_systemctl_failed(self, mock_systemctl):
+        mock_systemctl.return_value = False
+        self.mock_runner.run.return_value = ("", 1)
+        self.assertRaises(
+            lib.StartServiceError,
+            lambda: lib.start_service(self.mock_runner, self.service)
+        )
+        self.mock_runner.run.assert_called_once_with(
+            ["service", self.service, "start"]
+        )
+
+
+ at mock.patch("pcs.lib.external.is_systemctl")
+class StopServiceTest(TestCase):
+    def setUp(self):
+        self.mock_runner = mock.MagicMock(spec_set=lib.CommandRunner)
+        self.service = "service_name"
+
+    def test_systemctl(self, mock_systemctl):
+        mock_systemctl.return_value = True
+        self.mock_runner.run.return_value = ("", 0)
+        lib.stop_service(self.mock_runner, self.service)
+        self.mock_runner.run.assert_called_once_with(
+            ["systemctl", "stop", self.service + ".service"]
+        )
+
+    def test_systemctl_failed(self, mock_systemctl):
+        mock_systemctl.return_value = True
+        self.mock_runner.run.return_value = ("", 1)
+        self.assertRaises(
+            lib.StopServiceError,
+            lambda: lib.stop_service(self.mock_runner, self.service)
+        )
+        self.mock_runner.run.assert_called_once_with(
+            ["systemctl", "stop", self.service + ".service"]
+        )
+
+    def test_not_systemctl(self, mock_systemctl):
+        mock_systemctl.return_value = False
+        self.mock_runner.run.return_value = ("", 0)
+        lib.stop_service(self.mock_runner, self.service)
+        self.mock_runner.run.assert_called_once_with(
+            ["service", self.service, "stop"]
+        )
+
+    def test_not_systemctl_failed(self, mock_systemctl):
+        mock_systemctl.return_value = False
+        self.mock_runner.run.return_value = ("", 1)
+        self.assertRaises(
+            lib.StopServiceError,
+            lambda: lib.stop_service(self.mock_runner, self.service)
+        )
+        self.mock_runner.run.assert_called_once_with(
+            ["service", self.service, "stop"]
+        )
+
+
+class KillServicesTest(TestCase):
+    def setUp(self):
+        self.mock_runner = mock.MagicMock(spec_set=lib.CommandRunner)
+        self.services = ["service1", "service2"]
+
+    def test_success(self):
+        self.mock_runner.run.return_value = ("", 0)
+        lib.kill_services(self.mock_runner, self.services)
+        self.mock_runner.run.assert_called_once_with(
+            ["killall", "--quiet", "--signal", "9", "--"] + self.services
+        )
+
+    def test_failed(self):
+        self.mock_runner.run.return_value = ("error", 1)
+        self.assertRaises(
+            lib.KillServicesError,
+            lambda: lib.kill_services(self.mock_runner, self.services)
+        )
+        self.mock_runner.run.assert_called_once_with(
+            ["killall", "--quiet", "--signal", "9", "--"] + self.services
+        )
+
+    def test_service_not_running(self):
+        self.mock_runner.run.return_value = ("", 1)
+        lib.kill_services(self.mock_runner, self.services)
+        self.mock_runner.run.assert_called_once_with(
+            ["killall", "--quiet", "--signal", "9", "--"] + self.services
+        )
+
+
+ at mock.patch("os.listdir")
+ at mock.patch("os.path.isdir")
+ at mock.patch("os.path.exists")
+class IsDirNonemptyTest(TestCase):
+    def test_path_does_not_exist(self, mock_exists, mock_isdir, mock_listdir):
+        mock_exists.return_value = False
+        self.assertFalse(lib.is_dir_nonempty("path"))
+        mock_isdir.assert_not_called()
+        mock_listdir.assert_not_called()
+
+    def test_path_is_not_dir(self, mock_exists, mock_isdir, mock_listdir):
+        mock_exists.return_value = True
+        mock_isdir.return_value = False
+        self.assertTrue(lib.is_dir_nonempty("path"))
+        mock_listdir.assert_not_called()
+
+    def test_dir_is_empty(self, mock_exists, mock_isdir, mock_listdir):
+        mock_exists.return_value = True
+        mock_isdir.return_value = True
+        mock_listdir.return_value = []
+        self.assertFalse(lib.is_dir_nonempty("path"))
+
+    def test_dir_is_not_empty(self, mock_exists, mock_isdir, mock_listdir):
+        mock_exists.return_value = True
+        mock_isdir.return_value = True
+        mock_listdir.return_value = ["a_file"]
+        self.assertTrue(lib.is_dir_nonempty("path"))
+
+
+ at mock.patch("pcs.lib.external.is_systemctl")
+class IsServiceEnabledTest(TestCase):
+    def setUp(self):
+        self.mock_runner = mock.MagicMock(spec_set=lib.CommandRunner)
+        self.service = "service_name"
+
+    def test_systemctl_enabled(self, mock_systemctl):
+        mock_systemctl.return_value = True
+        self.mock_runner.run.return_value = ("enabled\n", 0)
+        self.assertTrue(lib.is_service_enabled(self.mock_runner, self.service))
+        self.mock_runner.run.assert_called_once_with(
+            ["systemctl", "is-enabled", self.service + ".service"]
+        )
+
+    def test_systemctl_disabled(self, mock_systemctl):
+        mock_systemctl.return_value = True
+        self.mock_runner.run.return_value = ("disabled\n", 2)
+        self.assertFalse(lib.is_service_enabled(self.mock_runner, self.service))
+        self.mock_runner.run.assert_called_once_with(
+            ["systemctl", "is-enabled", self.service + ".service"]
+        )
+
+    def test_not_systemctl_enabled(self, mock_systemctl):
+        mock_systemctl.return_value = False
+        self.mock_runner.run.return_value = ("", 0)
+        self.assertTrue(lib.is_service_enabled(self.mock_runner, self.service))
+        self.mock_runner.run.assert_called_once_with(
+            ["chkconfig", self.service]
+        )
+
+    def test_not_systemctl_disabled(self, mock_systemctl):
+        mock_systemctl.return_value = False
+        self.mock_runner.run.return_value = ("", 3)
+        self.assertFalse(lib.is_service_enabled(self.mock_runner, self.service))
+        self.mock_runner.run.assert_called_once_with(
+            ["chkconfig", self.service]
+        )
+
+
+ at mock.patch("pcs.lib.external.is_systemctl")
+class IsServiceRunningTest(TestCase):
+    def setUp(self):
+        self.mock_runner = mock.MagicMock(spec_set=lib.CommandRunner)
+        self.service = "service_name"
+
+    def test_systemctl_running(self, mock_systemctl):
+        mock_systemctl.return_value = True
+        self.mock_runner.run.return_value = ("", 0)
+        self.assertTrue(lib.is_service_running(self.mock_runner, self.service))
+        self.mock_runner.run.assert_called_once_with(
+            ["systemctl", "is-active", self.service + ".service"]
+        )
+
+    def test_systemctl_not_running(self, mock_systemctl):
+        mock_systemctl.return_value = True
+        self.mock_runner.run.return_value = ("", 2)
+        self.assertFalse(lib.is_service_running(self.mock_runner, self.service))
+        self.mock_runner.run.assert_called_once_with(
+            ["systemctl", "is-active", self.service + ".service"]
+        )
+
+    def test_not_systemctl_running(self, mock_systemctl):
+        mock_systemctl.return_value = False
+        self.mock_runner.run.return_value = ("", 0)
+        self.assertTrue(lib.is_service_running(self.mock_runner, self.service))
+        self.mock_runner.run.assert_called_once_with(
+            ["service", self.service, "status"]
+        )
+
+    def test_not_systemctl_not_running(self, mock_systemctl):
+        mock_systemctl.return_value = False
+        self.mock_runner.run.return_value = ("", 3)
+        self.assertFalse(lib.is_service_running(self.mock_runner, self.service))
+        self.mock_runner.run.assert_called_once_with(
+            ["service", self.service, "status"]
+        )
+
+
+ at mock.patch("pcs.lib.external.is_systemctl")
+ at mock.patch("pcs.lib.external.get_systemd_services")
+ at mock.patch("pcs.lib.external.get_non_systemd_services")
+class IsServiceInstalledTest(TestCase):
+    def setUp(self):
+        self.mock_runner = mock.MagicMock(spec_set=lib.CommandRunner)
+
+    def test_installed_systemd(
+        self, mock_non_systemd, mock_systemd, mock_is_systemctl
+    ):
+        mock_is_systemctl.return_value = True
+        mock_systemd.return_value = ["service1", "service2"]
+        mock_non_systemd.return_value = []
+        self.assertTrue(lib.is_service_installed(self.mock_runner, "service2"))
+        self.assertEqual(mock_is_systemctl.call_count, 1)
+        mock_systemd.assert_called_once_with(self.mock_runner)
+        self.assertEqual(mock_non_systemd.call_count, 0)
+
+    def test_not_installed_systemd(
+            self, mock_non_systemd, mock_systemd, mock_is_systemctl
+    ):
+        mock_is_systemctl.return_value = True
+        mock_systemd.return_value = ["service1", "service2"]
+        mock_non_systemd.return_value = []
+        self.assertFalse(lib.is_service_installed(self.mock_runner, "service3"))
+        self.assertEqual(mock_is_systemctl.call_count, 1)
+        mock_systemd.assert_called_once_with(self.mock_runner)
+        self.assertEqual(mock_non_systemd.call_count, 0)
+
+    def test_installed_not_systemd(
+            self, mock_non_systemd, mock_systemd, mock_is_systemctl
+    ):
+        mock_is_systemctl.return_value = False
+        mock_systemd.return_value = []
+        mock_non_systemd.return_value = ["service1", "service2"]
+        self.assertTrue(lib.is_service_installed(self.mock_runner, "service2"))
+        self.assertEqual(mock_is_systemctl.call_count, 1)
+        mock_non_systemd.assert_called_once_with(self.mock_runner)
+        self.assertEqual(mock_systemd.call_count, 0)
+
+    def test_not_installed_not_systemd(
+            self, mock_non_systemd, mock_systemd, mock_is_systemctl
+    ):
+        mock_is_systemctl.return_value = False
+
+        mock_systemd.return_value = []
+        mock_non_systemd.return_value = ["service1", "service2"]
+        self.assertFalse(lib.is_service_installed(self.mock_runner, "service3"))
+        self.assertEqual(mock_is_systemctl.call_count, 1)
+        mock_non_systemd.assert_called_once_with(self.mock_runner)
+        self.assertEqual(mock_systemd.call_count, 0)
+
+
+ at mock.patch("pcs.lib.external.is_systemctl")
+class GetSystemdServicesTest(TestCase):
+    def setUp(self):
+        self.mock_runner = mock.MagicMock(spec_set=lib.CommandRunner)
+
+    def test_success(self, mock_is_systemctl):
+        mock_is_systemctl.return_value = True
+        self.mock_runner.run.return_value = ("""\
+pcsd.service                                disabled
+sbd.service                                 enabled
+pacemaker.service                           enabled
+
+3 unit files listed.
+""", 0)
+        self.assertEqual(
+            lib.get_systemd_services(self.mock_runner),
+            ["pcsd", "sbd", "pacemaker"]
+        )
+        self.assertEqual(mock_is_systemctl.call_count, 1)
+        self.mock_runner.run.assert_called_once_with(
+            ["systemctl", "list-unit-files", "--full"]
+        )
+
+    def test_failed(self, mock_is_systemctl):
+        mock_is_systemctl.return_value = True
+        self.mock_runner.run.return_value = ("failed", 1)
+        self.assertEqual(lib.get_systemd_services(self.mock_runner), [])
+        self.assertEqual(mock_is_systemctl.call_count, 1)
+        self.mock_runner.run.assert_called_once_with(
+            ["systemctl", "list-unit-files", "--full"]
+        )
+
+    def test_not_systemd(self, mock_is_systemctl):
+        mock_is_systemctl.return_value = False
+        self.mock_runner.run.return_value = ("", 0)
+        self.assertEqual(lib.get_systemd_services(self.mock_runner), [])
+        self.assertEqual(mock_is_systemctl.call_count, 1)
+        self.assertEqual(self.mock_runner.call_count, 0)
+
+
+ at mock.patch("pcs.lib.external.is_systemctl")
+class GetNonSystemdServicesTest(TestCase):
+    def setUp(self):
+        self.mock_runner = mock.MagicMock(spec_set=lib.CommandRunner)
+
+    def test_success(self, mock_is_systemctl):
+        mock_is_systemctl.return_value = False
+        self.mock_runner.run.return_value = ("""\
+pcsd           	0:off	1:off	2:on	3:on	4:on	5:on	6:off
+sbd            	0:off	1:on	2:on	3:on	4:on	5:on	6:off
+pacemaker      	0:off	1:off	2:off	3:off	4:off	5:off	6:off
+""", 0)
+        self.assertEqual(
+            lib.get_non_systemd_services(self.mock_runner),
+            ["pcsd", "sbd", "pacemaker"]
+        )
+        self.assertEqual(mock_is_systemctl.call_count, 1)
+        self.mock_runner.run.assert_called_once_with(
+            ["chkconfig"], ignore_stderr=True
+        )
+
+    def test_failed(self, mock_is_systemctl):
+        mock_is_systemctl.return_value = False
+        self.mock_runner.run.return_value = ("failed", 1)
+        self.assertEqual(lib.get_non_systemd_services(self.mock_runner), [])
+        self.assertEqual(mock_is_systemctl.call_count, 1)
+        self.mock_runner.run.assert_called_once_with(
+            ["chkconfig"], ignore_stderr=True
+        )
+
+    def test_systemd(self, mock_is_systemctl):
+        mock_is_systemctl.return_value = True
+        self.mock_runner.run.return_value = ("", 0)
+        self.assertEqual(lib.get_non_systemd_services(self.mock_runner), [])
+        self.assertEqual(mock_is_systemctl.call_count, 1)
+        self.assertEqual(self.mock_runner.call_count, 0)
diff --git a/pcs/test/test_lib_node.py b/pcs/test/test_lib_node.py
index 19e5a3a..6c841d3 100644
--- a/pcs/test/test_lib_node.py
+++ b/pcs/test/test_lib_node.py
@@ -29,6 +29,44 @@ class NodeAddressesTest(TestCase):
         self.assertEqual(ring0, node.label)
         self.assertEqual(None, node.id)
 
+    def test_hash(self):
+        node0 = lib.NodeAddresses("node0")
+        another_node0 = lib.NodeAddresses("node0")
+        node1 = lib.NodeAddresses("node1")
+        self.assertEqual(hash(node0), hash(another_node0))
+        self.assertEqual(hash(node1), hash(node1))
+        self.assertNotEqual(hash(node0), hash(node1))
+
+    def test_equal(self):
+        node0 = lib.NodeAddresses("node0")
+        another_node0 = lib.NodeAddresses("node0")
+        node1 = lib.NodeAddresses("node1")
+        self.assertTrue(node0 == another_node0)
+        self.assertTrue(another_node0 == node0)
+        self.assertTrue(node1 == node1)
+        self.assertFalse(node1 == node0)
+        self.assertFalse(node0 == node1)
+
+    def test_not_equal(self):
+        node0 = lib.NodeAddresses("node0")
+        another_node0 = lib.NodeAddresses("node0")
+        node1 = lib.NodeAddresses("node1")
+        self.assertFalse(node0 != another_node0)
+        self.assertFalse(another_node0 != node0)
+        self.assertFalse(node1 != node1)
+        self.assertTrue(node0 != node1)
+        self.assertTrue(node1 != node0)
+
+    def test_less_than(self):
+        node0 = lib.NodeAddresses("node0")
+        another_node0 = lib.NodeAddresses("node0")
+        node1 = lib.NodeAddresses("node1")
+        self.assertFalse(node0 < another_node0)
+        self.assertFalse(another_node0 < node0)
+        self.assertFalse(node1 < node1)
+        self.assertTrue(node0 < node1)
+        self.assertFalse(node1 < node1)
+
 
 class NodeAddressesListTest(TestCase):
     def test_empty(self):
@@ -80,3 +118,13 @@ class NodeAddressesListTest(TestCase):
         self.assertEqual([node1, node2], list(nodes))
         self.assertEqual(node1, nodes[0])
         self.assertEqual(node2, nodes[1])
+
+    def test_find_by_label(self):
+        node0 = lib.NodeAddresses("node0")
+        node1 = lib.NodeAddresses("node1")
+        node_list = lib.NodeAddressesList([node0, node1])
+        self.assertEqual(node1, node_list.find_by_label("node1"))
+        self.assertEqual(node0, node_list.find_by_label("node0"))
+        self.assertRaises(
+            lib.NodeNotFound, lambda: node_list.find_by_label("node2")
+        )
diff --git a/pcs/test/test_lib_nodes_task.py b/pcs/test/test_lib_nodes_task.py
index cf42069..6af47d7 100644
--- a/pcs/test/test_lib_nodes_task.py
+++ b/pcs/test/test_lib_nodes_task.py
@@ -450,3 +450,13 @@ class CheckCorosyncOfflineTest(TestCase):
                 )
             ]
         )
+
+
+class NodeCheckAuthTest(TestCase):
+    def test_success(self):
+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+        node = NodeAddresses("node1")
+        lib.node_check_auth(mock_communicator, node)
+        mock_communicator.call_node.assert_called_once_with(
+            node, "remote/check_auth", "check_auth_only=1"
+        )
diff --git a/pcs/test/test_lib_resource_agent.py b/pcs/test/test_lib_resource_agent.py
index 707df55..5704184 100644
--- a/pcs/test/test_lib_resource_agent.py
+++ b/pcs/test/test_lib_resource_agent.py
@@ -11,7 +11,7 @@ import os.path
 from lxml import etree
 
 from pcs.test.tools.assertions import (
-    assert_raise_library_error,
+    ExtendedAssertionsMixin,
     assert_xml_equal,
 )
 from pcs.test.tools.pcs_mock import mock
@@ -19,13 +19,11 @@ from pcs.test.tools.xml import XmlManipulation as XmlMan
 
 
 from pcs import settings
-from pcs.common import report_codes
 from pcs.lib import resource_agent as lib_ra
-from pcs.lib.errors import ReportItemSeverity as Severities
 from pcs.lib.external import CommandRunner
 
 
-class LibraryResourceTest(TestCase):
+class LibraryResourceTest(TestCase, ExtendedAssertionsMixin):
     pass
 
 
@@ -68,14 +66,9 @@ class GetParameterTest(LibraryResourceTest):
 
     def test_no_name(self):
         xml = '<parameter />'
-        assert_raise_library_error(
-            lambda: lib_ra._get_parameter(etree.XML(xml)),
-            (
-                Severities.ERROR,
-                report_codes.INVALID_METADATA_FORMAT,
-                {},
-                True
-            )
+        self.assertRaises(
+            lib_ra.InvalidMetadataFormat,
+            lambda: lib_ra._get_parameter(etree.XML(xml))
         )
 
     def test_invalid_element(self):
@@ -88,14 +81,9 @@ class GetParameterTest(LibraryResourceTest):
                 <content type="test_type" default="default_value" />
             </param>
         """
-        assert_raise_library_error(
-            lambda: lib_ra._get_parameter(etree.XML(xml)),
-            (
-                Severities.ERROR,
-                report_codes.INVALID_METADATA_FORMAT,
-                {},
-                True
-            )
+        self.assertRaises(
+            lib_ra.InvalidMetadataFormat,
+            lambda: lib_ra._get_parameter(etree.XML(xml))
         )
 
 
@@ -161,14 +149,9 @@ class GetAgentParametersTest(LibraryResourceTest):
                 </parameters>
             </resource-agent>
         """
-        assert_raise_library_error(
-            lambda: lib_ra._get_agent_parameters(etree.XML(xml)),
-            (
-                Severities.ERROR,
-                report_codes.INVALID_METADATA_FORMAT,
-                {},
-                True
-            )
+        self.assertRaises(
+            lib_ra.InvalidMetadataFormat,
+            lambda: lib_ra._get_agent_parameters(etree.XML(xml))
         )
 
 
@@ -178,15 +161,10 @@ class GetFenceAgentMetadataTest(LibraryResourceTest):
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
         mock_obj.return_value = True
         agent_name = "agent"
-
-        assert_raise_library_error(
+        self.assert_raises(
+            lib_ra.AgentNotFound,
             lambda: lib_ra.get_fence_agent_metadata(mock_runner, agent_name),
-            (
-                Severities.ERROR,
-                report_codes.INVALID_RESOURCE_NAME,
-                {"agent_name": agent_name},
-                True
-            )
+            {"agent": agent_name}
         )
 
         mock_runner.run.assert_not_called()
@@ -196,15 +174,10 @@ class GetFenceAgentMetadataTest(LibraryResourceTest):
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
         mock_obj.return_value = True
         agent_name = "fence_agent/../fence"
-
-        assert_raise_library_error(
+        self.assert_raises(
+            lib_ra.AgentNotFound,
             lambda: lib_ra.get_fence_agent_metadata(mock_runner, agent_name),
-            (
-                Severities.ERROR,
-                report_codes.INVALID_RESOURCE_NAME,
-                {"agent_name": agent_name},
-                True
-            )
+            {"agent": agent_name}
         )
 
         mock_runner.run.assert_not_called()
@@ -215,33 +188,24 @@ class GetFenceAgentMetadataTest(LibraryResourceTest):
         mock_obj.return_value = False
         agent_name = "fence_agent"
 
-        assert_raise_library_error(
+        self.assert_raises(
+            lib_ra.AgentNotFound,
             lambda: lib_ra.get_fence_agent_metadata(mock_runner, agent_name),
-            (
-                Severities.ERROR,
-                report_codes.INVALID_RESOURCE_NAME,
-                {"agent_name": agent_name},
-                True
-            )
+            {"agent": agent_name}
         )
-
         mock_runner.run.assert_not_called()
 
     @mock.patch("pcs.lib.resource_agent.is_path_runnable")
     def test_execution_failed(self, mock_is_runnable):
         mock_is_runnable.return_value = True
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = ("", 1)
+        mock_runner.run.return_value = ("error", 1)
         agent_name = "fence_ipmi"
 
-        assert_raise_library_error(
+        self.assert_raises(
+            lib_ra.UnableToGetAgentMetadata,
             lambda: lib_ra.get_fence_agent_metadata(mock_runner, agent_name),
-            (
-                Severities.ERROR,
-                report_codes.UNABLE_TO_GET_AGENT_METADATA,
-                {"agent_name": agent_name},
-                True
-            )
+            {"agent": agent_name}
         )
 
         script_path = os.path.join(settings.fence_agent_binaries, agent_name)
@@ -255,15 +219,10 @@ class GetFenceAgentMetadataTest(LibraryResourceTest):
         mock_runner.run.return_value = ("not xml", 0)
         mock_is_runnable.return_value = True
         agent_name = "fence_ipmi"
-
-        assert_raise_library_error(
+        self.assert_raises(
+            lib_ra.UnableToGetAgentMetadata,
             lambda: lib_ra.get_fence_agent_metadata(mock_runner, agent_name),
-            (
-                Severities.ERROR,
-                report_codes.UNABLE_TO_GET_AGENT_METADATA,
-                {"agent_name": agent_name},
-                True
-            )
+            {"agent": agent_name}
         )
 
         script_path = os.path.join(settings.fence_agent_binaries, agent_name)
@@ -296,16 +255,12 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest):
         provider = "provider/../provider2"
         agent = "agent"
 
-        assert_raise_library_error(
+        self.assert_raises(
+            lib_ra.AgentNotFound,
             lambda: lib_ra._get_ocf_resource_agent_metadata(
                 mock_runner, provider, agent
             ),
-            (
-                Severities.ERROR,
-                report_codes.INVALID_RESOURCE_NAME,
-                {"agent_name": "ocf:{0}:{1}".format(provider, agent)},
-                True
-            )
+            {"agent": "ocf:{0}:{1}".format(provider, agent)}
         )
 
         mock_runner.run.assert_not_called()
@@ -317,16 +272,12 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest):
         provider = "provider"
         agent = "agent/../agent2"
 
-        assert_raise_library_error(
+        self.assert_raises(
+            lib_ra.AgentNotFound,
             lambda: lib_ra._get_ocf_resource_agent_metadata(
                 mock_runner, provider, agent
             ),
-            (
-                Severities.ERROR,
-                report_codes.INVALID_RESOURCE_NAME,
-                {"agent_name": "ocf:{0}:{1}".format(provider, agent)},
-                True
-            )
+            {"agent": "ocf:{0}:{1}".format(provider, agent)}
         )
 
         mock_runner.run.assert_not_called()
@@ -338,16 +289,12 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest):
         provider = "provider"
         agent = "agent"
 
-        assert_raise_library_error(
+        self.assert_raises(
+            lib_ra.AgentNotFound,
             lambda: lib_ra._get_ocf_resource_agent_metadata(
                 mock_runner, provider, agent
             ),
-            (
-                Severities.ERROR,
-                report_codes.INVALID_RESOURCE_NAME,
-                {"agent_name": "ocf:{0}:{1}".format(provider, agent)},
-                True
-            )
+            {"agent": "ocf:{0}:{1}".format(provider, agent)}
         )
 
         mock_runner.run.assert_not_called()
@@ -357,19 +304,15 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest):
         provider = "provider"
         agent = "agent"
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
-        mock_runner.run.return_value = ("", 1)
+        mock_runner.run.return_value = ("error", 1)
         mock_is_runnable.return_value = True
 
-        assert_raise_library_error(
+        self.assert_raises(
+            lib_ra.UnableToGetAgentMetadata,
             lambda: lib_ra._get_ocf_resource_agent_metadata(
                 mock_runner, provider, agent
             ),
-            (
-                Severities.ERROR,
-                report_codes.UNABLE_TO_GET_AGENT_METADATA,
-                {"agent_name": "ocf:{0}:{1}".format(provider, agent)},
-                True
-            )
+            {"agent": "ocf:{0}:{1}".format(provider, agent)}
         )
 
         script_path = os.path.join(settings.ocf_resources, provider, agent)
@@ -387,16 +330,12 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest):
         mock_runner.run.return_value = ("not xml", 0)
         mock_is_runnable.return_value = True
 
-        assert_raise_library_error(
+        self.assert_raises(
+            lib_ra.UnableToGetAgentMetadata,
             lambda: lib_ra._get_ocf_resource_agent_metadata(
                 mock_runner, provider, agent
             ),
-            (
-                Severities.ERROR,
-                report_codes.UNABLE_TO_GET_AGENT_METADATA,
-                {"agent_name": "ocf:{0}:{1}".format(provider, agent)},
-                True
-            )
+            {"agent": "ocf:{0}:{1}".format(provider, agent)}
         )
 
         script_path = os.path.join(settings.ocf_resources, provider, agent)
@@ -431,42 +370,30 @@ class GetOcfResourceAgentMetadataTest(LibraryResourceTest):
 class GetNagiosResourceAgentMetadataTest(LibraryResourceTest):
     def test_relative_path_name(self):
         agent = "agent/../agent2"
-        assert_raise_library_error(
+        self.assert_raises(
+            lib_ra.AgentNotFound,
             lambda: lib_ra._get_nagios_resource_agent_metadata(agent),
-            (
-                Severities.ERROR,
-                report_codes.INVALID_RESOURCE_NAME,
-                {"agent_name": "nagios:" + agent},
-                True
-            )
+            {"agent": "nagios:" + agent}
         )
 
     @mock.patch("lxml.etree.parse")
     def test_file_opening_exception(self, mock_obj):
         agent = "agent"
         mock_obj.side_effect = IOError()
-        assert_raise_library_error(
+        self.assert_raises(
+            lib_ra.UnableToGetAgentMetadata,
             lambda: lib_ra._get_nagios_resource_agent_metadata(agent),
-            (
-                Severities.ERROR,
-                report_codes.UNABLE_TO_GET_AGENT_METADATA,
-                {"agent_name": "nagios:" + agent},
-                True
-            )
+            {"agent": "nagios:" + agent}
         )
 
     @mock.patch("lxml.etree.parse")
     def test_invalid_xml(self, mock_obj):
         agent = "agent"
         mock_obj.side_effect = etree.XMLSyntaxError(None, None, None, None)
-        assert_raise_library_error(
+        self.assert_raises(
+            lib_ra.UnableToGetAgentMetadata,
             lambda: lib_ra._get_nagios_resource_agent_metadata(agent),
-            (
-                Severities.ERROR,
-                report_codes.UNABLE_TO_GET_AGENT_METADATA,
-                {"agent_name": "nagios:" + agent},
-                True
-            )
+            {"agent": "nagios:" + agent}
         )
 
     @mock.patch("lxml.etree.parse")
@@ -486,14 +413,9 @@ class GetNagiosResourceAgentMetadataTest(LibraryResourceTest):
 class GetAgentDescTest(LibraryResourceTest):
     def test_invalid_metadata_format(self):
         xml = "<xml />"
-        assert_raise_library_error(
-            lambda: lib_ra.get_agent_desc(etree.XML(xml)),
-            (
-                Severities.ERROR,
-                report_codes.INVALID_METADATA_FORMAT,
-                {},
-                True
-            )
+        self.assertRaises(
+            lib_ra.InvalidMetadataFormat,
+            lambda: lib_ra.get_agent_desc(etree.XML(xml))
         )
 
     def test_no_desc(self):
@@ -576,25 +498,49 @@ class FilterFenceAgentParametersTest(LibraryResourceTest):
         self.assertEqual(
             [
                 {"name": "valid_param"},
+                {
+                    "name": "action",
+                    "required": False,
+                    "shortdesc":
+                        "\nWARNING: specifying 'action' is deprecated and not" +
+                        " necessary with current Pacemaker versions"
+                },
                 {"name": "another_param"}
             ],
             lib_ra._filter_fence_agent_parameters(params)
         )
 
+    def test_action(self):
+        params = [
+            {
+                "name": "action",
+                "required": True,
+                "shortdesc": "Action"
+            }
+        ]
+
+        self.assertEqual(
+            [
+                {
+                    "name": "action",
+                    "required": False,
+                    "shortdesc":
+                        "Action\nWARNING: specifying 'action' is deprecated " +
+                        "and not necessary with current Pacemaker versions"
+                }
+            ],
+            lib_ra._filter_fence_agent_parameters(params)
+        )
+
 
 class GetResourceAgentMetadata(LibraryResourceTest):
     def test_unsupported_class(self):
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
         agent = "class:provider:agent"
-
-        assert_raise_library_error(
+        self.assert_raises(
+            lib_ra.UnsupportedResourceAgent,
             lambda: lib_ra.get_resource_agent_metadata(mock_runner, agent),
-            (
-                Severities.ERROR,
-                report_codes.UNSUPPORTED_RESOURCE_AGENT,
-                {},
-                True
-            )
+            {"agent": agent}
         )
 
         mock_runner.run.assert_not_called()
@@ -603,14 +549,10 @@ class GetResourceAgentMetadata(LibraryResourceTest):
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
         agent = "ocf:agent"
 
-        assert_raise_library_error(
+        self.assert_raises(
+            lib_ra.UnsupportedResourceAgent,
             lambda: lib_ra.get_resource_agent_metadata(mock_runner, agent),
-            (
-                Severities.ERROR,
-                report_codes.UNSUPPORTED_RESOURCE_AGENT,
-                {},
-                True
-            )
+            {"agent": agent}
         )
 
         mock_runner.run.assert_not_called()
@@ -687,15 +629,12 @@ class GetPcmkAdvancedStonithParametersTest(LibraryResourceTest):
     def test_failed_to_get_xml(self):
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
         mock_runner.run.return_value = ("", 1)
-        assert_raise_library_error(
+        self.assert_raises(
+            lib_ra.UnableToGetAgentMetadata,
             lambda: lib_ra._get_pcmk_advanced_stonith_parameters(mock_runner),
-            (
-                Severities.ERROR,
-                report_codes.UNABLE_TO_GET_AGENT_METADATA,
-                {},
-                True
-            )
+            {"agent": "stonithd"}
         )
+
         mock_runner.run.assert_called_once_with(
             [settings.stonithd_binary, "metadata"], ignore_stderr=True
         )
@@ -703,15 +642,11 @@ class GetPcmkAdvancedStonithParametersTest(LibraryResourceTest):
     def test_invalid_xml(self):
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
         mock_runner.run.return_value = ("invalid XML", 0)
-        assert_raise_library_error(
-            lambda: lib_ra._get_pcmk_advanced_stonith_parameters(mock_runner),
-            (
-                Severities.ERROR,
-                report_codes.INVALID_METADATA_FORMAT,
-                {},
-                True
-            )
+        self.assertRaises(
+            lib_ra.InvalidMetadataFormat,
+            lambda: lib_ra._get_pcmk_advanced_stonith_parameters(mock_runner)
         )
+
         mock_runner.run.assert_called_once_with(
             [settings.stonithd_binary, "metadata"], ignore_stderr=True
         )
@@ -741,38 +676,23 @@ class GetActionTest(LibraryResourceTest):
 
     def test_empty(self):
         xml = '<action />'
-        assert_raise_library_error(
-            lambda: lib_ra._get_action(etree.XML(xml)),
-            (
-                Severities.ERROR,
-                report_codes.INVALID_METADATA_FORMAT,
-                {},
-                True
-            )
+        self.assertRaises(
+            lib_ra.InvalidMetadataFormat,
+            lambda: lib_ra._get_action(etree.XML(xml))
         )
 
     def test_no_name(self):
         xml = '<action param="value" another_param="same_value" />'
-        assert_raise_library_error(
-            lambda: lib_ra._get_action(etree.XML(xml)),
-            (
-                Severities.ERROR,
-                report_codes.INVALID_METADATA_FORMAT,
-                {},
-                True
-            )
+        self.assertRaises(
+            lib_ra.InvalidMetadataFormat,
+            lambda: lib_ra._get_action(etree.XML(xml))
         )
 
     def test_not_action_element(self):
         xml = '<actions param="value" another_param="same_value" />'
-        assert_raise_library_error(
-            lambda: lib_ra._get_action(etree.XML(xml)),
-            (
-                Severities.ERROR,
-                report_codes.INVALID_METADATA_FORMAT,
-                {},
-                True
-            )
+        self.assertRaises(
+            lib_ra.InvalidMetadataFormat,
+            lambda: lib_ra._get_action(etree.XML(xml))
         )
 
 
@@ -810,14 +730,9 @@ class GetAgentActionsTest(LibraryResourceTest):
                 </actions>
             </agent>
         """
-        assert_raise_library_error(
-            lambda: lib_ra._get_action(etree.XML(xml)),
-            (
-                Severities.ERROR,
-                report_codes.INVALID_METADATA_FORMAT,
-                {},
-                True
-            )
+        self.assertRaises(
+            lib_ra.InvalidMetadataFormat,
+            lambda: lib_ra._get_action(etree.XML(xml))
         )
 
     def test_action_without_name(self):
@@ -831,14 +746,9 @@ class GetAgentActionsTest(LibraryResourceTest):
                 </actions>
             </resource-agent>
         """
-        assert_raise_library_error(
-            lambda: lib_ra._get_action(etree.XML(xml)),
-            (
-                Severities.ERROR,
-                report_codes.INVALID_METADATA_FORMAT,
-                {},
-                True
-            )
+        self.assertRaises(
+            lib_ra.InvalidMetadataFormat,
+            lambda: lib_ra._get_action(etree.XML(xml))
         )
 
     def test_empty_actions(self):
diff --git a/pcs/test/test_lib_sbd.py b/pcs/test/test_lib_sbd.py
new file mode 100644
index 0000000..54c5669
--- /dev/null
+++ b/pcs/test/test_lib_sbd.py
@@ -0,0 +1,596 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import json
+from unittest import TestCase
+
+from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.assertions import (
+    assert_raise_library_error,
+    assert_report_item_list_equal,
+)
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+
+from pcs.common import report_codes
+from pcs.lib.errors import (
+    ReportItemSeverity as Severities,
+    ReportItem,
+    LibraryError,
+)
+from pcs.lib.node import NodeAddresses
+from pcs.lib.external import (
+    NodeCommunicator,
+    NodeCommunicationException,
+    NodeConnectionException,
+)
+import pcs.lib.sbd as lib_sbd
+
+
+class TestException(Exception):
+    pass
+
+
+class RunParallelAndRaiseLibErrorOnFailureTest(TestCase):
+    def test_no_report_items(self):
+        # test that no exception has been raised
+        lib_sbd._run_parallel_and_raise_lib_error_on_failure(
+            lambda: [],
+            [([], {}) for _ in range(5)]
+        )
+
+    def test_failures(self):
+        def report_item_generator(i):
+            if i == 1:
+                raise NodeConnectionException("node", "command", "reason")
+            elif i == 2:
+                raise LibraryError(
+                    ReportItem.error(
+                        report_codes.COMMON_ERROR,
+                        "another report"
+                    ),
+                    ReportItem.info(
+                        report_codes.COMMON_INFO,
+                        "just info"
+                    )
+                )
+
+        assert_raise_library_error(
+            lambda: lib_sbd._run_parallel_and_raise_lib_error_on_failure(
+                report_item_generator,
+                [([i], {}) for i in range(5)]
+            ),
+            (
+                Severities.ERROR,
+                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+                {
+                    "node": "node",
+                    "command": "command",
+                    "reason": "reason"
+                }
+            ),
+            (
+                Severities.ERROR,
+                report_codes.COMMON_ERROR,
+                {}
+            ),
+            (
+                Severities.INFO,
+                report_codes.COMMON_INFO,
+                {}
+            )
+        )
+
+
+class CheckSbdTest(TestCase):
+    def test_success(self):
+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+        node = NodeAddresses("node1")
+        lib_sbd.check_sbd(mock_communicator, node, "/dev/watchdog")
+        mock_communicator.call_node.assert_called_once_with(
+            node, "remote/check_sbd", "watchdog=%2Fdev%2Fwatchdog"
+        )
+
+
+ at mock.patch("pcs.lib.sbd.check_sbd")
+class CheckSbdOnNodeTest(TestCase):
+    def setUp(self):
+        self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
+        self.mock_rep = MockLibraryReportProcessor()
+        self.node = NodeAddresses("node1")
+
+    def test_success(self, mock_check_sbd):
+        mock_check_sbd.return_value = json.dumps({
+            "sbd": {
+                "installed": True
+            },
+            "watchdog": {
+                "exist": True
+            }
+        })
+        # if no exception was raised, it's fine
+        lib_sbd.check_sbd_on_node(
+                self.mock_rep, self.mock_com, self.node, "watchdog"
+        )
+        mock_check_sbd.assert_called_once_with(
+            self.mock_com, self.node, "watchdog"
+        )
+        assert_report_item_list_equal(
+            self.mock_rep.report_item_list,
+            [(
+                Severities.INFO,
+                report_codes.SBD_CHECK_SUCCESS,
+                {"node": self.node.label}
+            )]
+        )
+
+    def test_unable_to_connect(self, mock_check_sbd):
+        mock_check_sbd.side_effect = NodeConnectionException(
+            self.node.label, "command", "reason"
+        )
+        self.assertRaises(
+            NodeCommunicationException,
+            lambda: lib_sbd.check_sbd_on_node(
+                self.mock_rep, self.mock_com, self.node, "watchdog"
+            )
+        )
+        mock_check_sbd.assert_called_once_with(
+            self.mock_com, self.node, "watchdog"
+        )
+        self.assertEqual(0, len(self.mock_rep.report_item_list))
+
+    def test_data_parsing_error(self, mock_check_sbd):
+        mock_check_sbd.return_value = "invalid JSON"
+        assert_raise_library_error(
+            lambda: lib_sbd.check_sbd_on_node(
+                self.mock_rep, self.mock_com, self.node, "watchdog"
+            ),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_RESPONSE_FORMAT,
+                {"node": self.node.label}
+            )
+        )
+        mock_check_sbd.assert_called_once_with(
+            self.mock_com, self.node, "watchdog"
+        )
+        self.assertEqual(0, len(self.mock_rep.report_item_list))
+
+    def test_sbd_not_installed(self, mock_check_sbd):
+        mock_check_sbd.return_value = json.dumps({
+            "sbd": {
+                "installed": False
+            },
+            "watchdog": {
+                "exist": True
+            }
+        })
+        assert_raise_library_error(
+            lambda: lib_sbd.check_sbd_on_node(
+                self.mock_rep, self.mock_com, self.node, "watchdog"
+            ),
+            (
+                Severities.ERROR,
+                report_codes.SBD_NOT_INSTALLED,
+                {"node": self.node.label}
+            )
+        )
+        mock_check_sbd.assert_called_once_with(
+            self.mock_com, self.node, "watchdog"
+        )
+        self.assertEqual(0, len(self.mock_rep.report_item_list))
+
+    def test_watchdog_does_not_exist(self, mock_check_sbd):
+        mock_check_sbd.return_value = json.dumps({
+            "sbd": {
+                "installed": True
+            },
+            "watchdog": {
+                "exist": False
+            }
+        })
+        assert_raise_library_error(
+            lambda: lib_sbd.check_sbd_on_node(
+                self.mock_rep, self.mock_com, self.node, "watchdog"
+            ),
+            (
+                Severities.ERROR,
+                report_codes.WATCHDOG_NOT_FOUND,
+                {"node": self.node.label}
+            )
+        )
+        mock_check_sbd.assert_called_once_with(
+            self.mock_com, self.node, "watchdog"
+        )
+        self.assertEqual(0, len(self.mock_rep.report_item_list))
+
+    def test_watchdog_does_not_exist_and_sbd_not_installed(
+            self, mock_check_sbd
+    ):
+        mock_check_sbd.return_value = json.dumps({
+            "sbd": {
+                "installed": False
+            },
+            "watchdog": {
+                "exist": False
+            }
+        })
+        assert_raise_library_error(
+            lambda: lib_sbd.check_sbd_on_node(
+                self.mock_rep, self.mock_com, self.node, "watchdog"
+            ),
+            (
+                Severities.ERROR,
+                report_codes.WATCHDOG_NOT_FOUND,
+                {"node": self.node.label}
+            ),
+            (
+                Severities.ERROR,
+                report_codes.SBD_NOT_INSTALLED,
+                {"node": self.node.label}
+            )
+        )
+        mock_check_sbd.assert_called_once_with(
+            self.mock_com, self.node, "watchdog"
+        )
+        self.assertEqual(0, len(self.mock_rep.report_item_list))
+
+    def test_invalid_response_format(self, mock_check_sbd):
+        mock_check_sbd.return_value = json.dumps({
+            "not_sbd": {
+                "installed": False
+            },
+            "watchdog": {
+                "exist": False
+            }
+        })
+        assert_raise_library_error(
+            lambda: lib_sbd.check_sbd_on_node(
+                self.mock_rep, self.mock_com, self.node, "watchdog"
+            ),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_RESPONSE_FORMAT,
+                {"node": self.node.label}
+            )
+        )
+        mock_check_sbd.assert_called_once_with(
+            self.mock_com, self.node, "watchdog"
+        )
+        self.assertEqual(0, len(self.mock_rep.report_item_list))
+
+
+ at mock.patch("pcs.lib.sbd._run_parallel_and_raise_lib_error_on_failure")
+class CheckSbdOnAllNodesTest(TestCase):
+    def test_success(self, mock_func):
+        mock_com = mock.MagicMock(spec_set=NodeCommunicator)
+        mock_rep = MockLibraryReportProcessor()
+        node_list = [NodeAddresses("node" + str(i)) for i in range(2)]
+        data = {
+            node_list[0]: "/dev/watchdog1",
+            node_list[1]: "/dev/watchdog2"
+        }
+        lib_sbd.check_sbd_on_all_nodes(mock_rep, mock_com, data)
+        items = sorted(data.items())
+        mock_func.assert_called_once_with(
+            lib_sbd.check_sbd_on_node,
+            [
+                ([mock_rep, mock_com, node, watchdog], {})
+                for node, watchdog in items
+            ]
+        )
+
+
+class SetSbdConfigTest(TestCase):
+    def test_success(self):
+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+        node = NodeAddresses("node1")
+        cfg = """# This file has been generated by pcs.
+SBD_OPTS="-n node1"
+SBD_WATCHDOG_DEV=/dev/watchdog
+SBD_WATCHDOG_TIMEOUT=0
+"""
+        lib_sbd.set_sbd_config(mock_communicator, node, cfg)
+        cfg_url_encoded = "%23+This+file+has+been+generated+by+" \
+            "pcs.%0ASBD_OPTS%3D%22-n+node1%22%0ASBD_WATCHDOG_DEV%3D%2Fdev%2" \
+            "Fwatchdog%0ASBD_WATCHDOG_TIMEOUT%3D0%0A"
+        mock_communicator.call_node.assert_called_once_with(
+            node, "remote/set_sbd_config", "config=" + cfg_url_encoded
+        )
+
+
+ at mock.patch("pcs.lib.sbd.set_sbd_config")
+class SetSbdConfigOnNodeTest(TestCase):
+    def setUp(self):
+        self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
+        self.mock_rep = MockLibraryReportProcessor()
+        self.node = NodeAddresses("node1")
+
+    def test_success(self, mock_set_sbd_cfg):
+        cfg_in = {
+            "SBD_WATCHDOG_DEV": "/dev/watchdog",
+            "SBD_WATCHDOG_TIMEOUT": "0"
+        }
+        cfg_out = """# This file has been generated by pcs.
+SBD_OPTS="-n node1"
+SBD_WATCHDOG_DEV=/dev/watchdog
+SBD_WATCHDOG_TIMEOUT=0
+"""
+        lib_sbd.set_sbd_config_on_node(
+            self.mock_rep, self.mock_com, self.node, cfg_in
+        )
+        mock_set_sbd_cfg.assert_called_once_with(
+            self.mock_com, self.node, cfg_out
+        )
+        assert_report_item_list_equal(
+            self.mock_rep.report_item_list,
+            [(
+                Severities.INFO,
+                report_codes.SBD_CONFIG_ACCEPTED_BY_NODE,
+                {"node": self.node.label}
+            )]
+        )
+
+
+ at mock.patch("pcs.lib.sbd._run_parallel_and_raise_lib_error_on_failure")
+class SetSbdConfigOnAllNodesTest(TestCase):
+    def test_success(self, mock_func):
+        mock_com = mock.MagicMock(spec_set=NodeCommunicator)
+        mock_rep = MockLibraryReportProcessor()
+        node_list = [NodeAddresses("node" + str(i)) for i in range(5)]
+        config = {
+            "opt1": "val1",
+            "opt2": "val2"
+        }
+        lib_sbd.set_sbd_config_on_all_nodes(
+            mock_rep, mock_com, node_list, config
+        )
+        mock_func.assert_called_once_with(
+            lib_sbd.set_sbd_config_on_node,
+            [([mock_rep, mock_com, node, config], {}) for node in node_list]
+        )
+
+
+class EnableSbdServiceTest(TestCase):
+    def test_success(self):
+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+        node = NodeAddresses("node1")
+        lib_sbd.enable_sbd_service(mock_communicator, node)
+        mock_communicator.call_node.assert_called_once_with(
+            node, "remote/sbd_enable", ""
+        )
+
+
+class EnableSbdServiceOnNodeTest(TestCase):
+    def setUp(self):
+        self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
+        self.mock_rep = MockLibraryReportProcessor()
+        self.node = NodeAddresses("node1")
+
+    @mock.patch("pcs.lib.sbd.enable_sbd_service")
+    def test_success(self, mock_enable_sbd):
+        lib_sbd.enable_sbd_service_on_node(
+            self.mock_rep, self.mock_com, self.node
+        )
+        mock_enable_sbd.assert_called_once_with(self.mock_com, self.node)
+        assert_report_item_list_equal(
+            self.mock_rep.report_item_list,
+            [(
+                Severities.INFO,
+                report_codes.SERVICE_ENABLE_SUCCESS,
+                {
+                    "service": "sbd",
+                    "node": self.node.label
+                }
+            )]
+        )
+
+
+ at mock.patch("pcs.lib.sbd._run_parallel_and_raise_lib_error_on_failure")
+class EnableSbdServiceOnAllNodes(TestCase):
+    def test_success(self, mock_func):
+        mock_com = mock.MagicMock(spec_set=NodeCommunicator)
+        mock_rep = MockLibraryReportProcessor()
+        node_list = [NodeAddresses("node" + str(i)) for i in range(5)]
+        lib_sbd.enable_sbd_service_on_all_nodes(mock_rep, mock_com, node_list)
+        mock_func.assert_called_once_with(
+            lib_sbd.enable_sbd_service_on_node,
+            [([mock_rep, mock_com, node], {}) for node in node_list]
+        )
+
+
+class DisableSbdServiceTest(TestCase):
+    def test_success(self):
+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+        node = NodeAddresses("node1")
+        lib_sbd.disable_sbd_service(mock_communicator, node)
+        mock_communicator.call_node.assert_called_once_with(
+            node, "remote/sbd_disable", ""
+        )
+
+
+class DisableSbdServiceOnNodeTest(TestCase):
+    def setUp(self):
+        self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
+        self.mock_rep = MockLibraryReportProcessor()
+        self.node = NodeAddresses("node1")
+
+    @mock.patch("pcs.lib.sbd.disable_sbd_service")
+    def test_success(self, mock_disable_sbd):
+        lib_sbd.disable_sbd_service_on_node(
+            self.mock_rep, self.mock_com, self.node
+        )
+        mock_disable_sbd.assert_called_once_with(self.mock_com, self.node)
+        assert_report_item_list_equal(
+            self.mock_rep.report_item_list,
+            [(
+                Severities.INFO,
+                report_codes.SERVICE_DISABLE_SUCCESS,
+                {
+                    "service": "sbd",
+                    "node": self.node.label
+                }
+            )]
+        )
+
+
+ at mock.patch("pcs.lib.sbd._run_parallel_and_raise_lib_error_on_failure")
+class DisableSbdServiceOnAllNodes(TestCase):
+    def test_success(self, mock_func):
+        mock_com = mock.MagicMock(spec_set=NodeCommunicator)
+        mock_rep = MockLibraryReportProcessor()
+        node_list = [NodeAddresses("node" + str(i)) for i in range(5)]
+        lib_sbd.disable_sbd_service_on_all_nodes(mock_rep, mock_com, node_list)
+        mock_func.assert_called_once_with(
+            lib_sbd.disable_sbd_service_on_node,
+            [([mock_rep, mock_com, node], {}) for node in node_list]
+        )
+
+
+class SetStonithWatchdogTimeoutToZeroTest(TestCase):
+    def test_success(self):
+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+        node = NodeAddresses("node1")
+        lib_sbd.set_stonith_watchdog_timeout_to_zero(mock_communicator, node)
+        mock_communicator.call_node.assert_called_once_with(
+            node, "remote/set_stonith_watchdog_timeout_to_zero", ""
+        )
+
+
+ at mock.patch("pcs.lib.sbd.set_stonith_watchdog_timeout_to_zero")
+class SetStonithWatchdogTimeoutToZeroOnAllNodesTest(TestCase):
+    def setUp(self):
+        self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
+        self.node_list = [NodeAddresses("node" + str(i)) for i in range(5)]
+
+    def test_success(self, mock_func):
+        lib_sbd.set_stonith_watchdog_timeout_to_zero_on_all_nodes(
+            self.mock_com, self.node_list
+        )
+        func_calls = [mock.call(self.mock_com, node) for node in self.node_list]
+        self.assertEqual(mock_func.call_count, len(func_calls))
+        mock_func.assert_has_calls(func_calls)
+
+    def test_communication_error(self, mock_func):
+        def raiser(_, node):
+            if node == self.node_list[1]:
+                raise NodeConnectionException(
+                    self.node_list[1], "command", "reason"
+                )
+            elif node == self.node_list[4]:
+                raise NodeCommunicationException(
+                    self.node_list[4], "command", "reason"
+                )
+
+        mock_func.side_effect = raiser
+        assert_raise_library_error(
+            lambda: lib_sbd.set_stonith_watchdog_timeout_to_zero_on_all_nodes(
+                self.mock_com, self.node_list
+            ),
+            (
+                Severities.ERROR,
+                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+                {
+                    "node": self.node_list[1],
+                    "command": "command",
+                    "reason": "reason"
+                }
+            ),
+            (
+                Severities.ERROR,
+                report_codes.NODE_COMMUNICATION_ERROR,
+                {
+                    "node": self.node_list[4],
+                    "command": "command",
+                    "reason": "reason"
+                }
+            )
+        )
+        func_calls = [mock.call(self.mock_com, node) for node in self.node_list]
+        self.assertEqual(mock_func.call_count, len(func_calls))
+        mock_func.assert_has_calls(func_calls)
+
+
+class RemoveStonithWatchdogTimeoutTest(TestCase):
+    def test_success(self):
+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+        node = NodeAddresses("node1")
+        lib_sbd.remove_stonith_watchdog_timeout(mock_communicator, node)
+        mock_communicator.call_node.assert_called_once_with(
+            node, "remote/remove_stonith_watchdog_timeout", ""
+        )
+
+
+ at mock.patch("pcs.lib.sbd.remove_stonith_watchdog_timeout")
+class RemoveStonithWatchdogTimeoutOnAllNodesTest(TestCase):
+    def setUp(self):
+        self.mock_com = mock.MagicMock(spec_set=NodeCommunicator)
+        self.node_list = [NodeAddresses("node" + str(i)) for i in range(5)]
+
+    def test_success(self, mock_func):
+        lib_sbd.remove_stonith_watchdog_timeout_on_all_nodes(
+            self.mock_com, self.node_list
+        )
+        func_calls = [mock.call(self.mock_com, node) for node in self.node_list]
+        self.assertEqual(mock_func.call_count, len(func_calls))
+        mock_func.assert_has_calls(func_calls)
+
+    def test_communication_error(self, mock_func):
+        def raiser(_, node):
+            if node == self.node_list[1]:
+                raise NodeConnectionException(
+                    self.node_list[1], "command", "reason"
+                )
+            elif node == self.node_list[4]:
+                raise NodeCommunicationException(
+                    self.node_list[4], "command", "reason"
+                )
+
+        mock_func.side_effect = raiser
+        assert_raise_library_error(
+            lambda: lib_sbd.remove_stonith_watchdog_timeout_on_all_nodes(
+                self.mock_com, self.node_list
+            ),
+            (
+                Severities.ERROR,
+                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+                {
+                    "node": self.node_list[1],
+                    "command": "command",
+                    "reason": "reason"
+                }
+            ),
+            (
+                Severities.ERROR,
+                report_codes.NODE_COMMUNICATION_ERROR,
+                {
+                    "node": self.node_list[4],
+                    "command": "command",
+                    "reason": "reason"
+                }
+            )
+        )
+        func_calls = [mock.call(self.mock_com, node) for node in self.node_list]
+        self.assertEqual(mock_func.call_count, len(func_calls))
+        mock_func.assert_has_calls(func_calls)
+
+
+class GetSbdConfigTest(TestCase):
+    def test_success(self):
+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+        node = NodeAddresses("node1")
+        lib_sbd.get_sbd_config(mock_communicator, node)
+        mock_communicator.call_node.assert_called_once_with(
+            node, "remote/get_sbd_config", ""
+        )
+
+
+ at mock.patch("pcs.lib.external.is_service_enabled")
+class IsSbdEnabledTest(TestCase):
+    def test_success(self, mock_is_service_enabled):
+        mock_obj = mock.MagicMock()
+        mock_is_service_enabled.return_value = True
+        self.assertTrue(lib_sbd.is_sbd_enabled(mock_obj))
diff --git a/pcs/test/test_lib_tools.py b/pcs/test/test_lib_tools.py
new file mode 100644
index 0000000..5141ca9
--- /dev/null
+++ b/pcs/test/test_lib_tools.py
@@ -0,0 +1,44 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+
+from pcs.lib import tools
+
+
+class EnvironmentFileToDictTest(TestCase):
+    def test_success(self):
+        data = """
+# ignored comment
+IgnoredOptionNoValue
+NUMBER=10
+OPTION=value # this is also value
+ANOTHER ONE="complex value"
+; another comment
+
+"""
+        expected = {
+            "NUMBER": "10",
+            "OPTION": "value # this is also value",
+            "ANOTHER ONE": '"complex value"'
+        }
+        self.assertEqual(expected, tools.environment_file_to_dict(data))
+
+
+class DictToEnvironmentFileTest(TestCase):
+    def test_success(self):
+        cfg_dict = {
+            "OPTION": "value",
+            "ANOTHER": "option value",
+            "ANOTHER ONE": '"complex value"'
+        }
+        expected = """# This file has been generated by pcs.
+ANOTHER=option value
+ANOTHER ONE="complex value"
+OPTION=value
+"""
+        self.assertEqual(expected, tools.dict_to_environment_file(cfg_dict))
diff --git a/pcs/test/test_quorum.py b/pcs/test/test_quorum.py
index b85b880..8167ad9 100644
--- a/pcs/test/test_quorum.py
+++ b/pcs/test/test_quorum.py
@@ -144,7 +144,7 @@ class DeviceAddTest(TestBase):
 
     def test_success_model_only(self):
         self.assert_pcs_success(
-            "quorum device add model net host=127.0.0.1"
+            "quorum device add model net host=127.0.0.1 algorithm=ffsplit"
         )
         self.assert_pcs_success(
             "quorum config",
@@ -152,13 +152,14 @@ class DeviceAddTest(TestBase):
 Options:
 Device:
   Model: net
+    algorithm: ffsplit
     host: 127.0.0.1
 """
         )
 
-    def test_succes_all_options(self):
+    def test_succes_generic_and_model_options(self):
         self.assert_pcs_success(
-            "quorum device add timeout=12345 model net host=127.0.0.1"
+            "quorum device add timeout=12345 model net host=127.0.0.1 algorithm=ffsplit"
         )
         self.assert_pcs_success(
             "quorum config",
@@ -167,6 +168,7 @@ Options:
 Device:
   timeout: 12345
   Model: net
+    algorithm: ffsplit
     host: 127.0.0.1
 """
         )
@@ -174,29 +176,35 @@ Device:
     def test_missing_required_options(self):
         self.assert_pcs_fail(
             "quorum device add model net",
-            "Error: required option 'host' is missing\n"
+            """\
+Error: required option 'algorithm' is missing
+Error: required option 'host' is missing
+"""
         )
         self.assert_pcs_fail(
             "quorum device add model net --force",
-            "Error: required option 'host' is missing\n"
+            """\
+Error: required option 'algorithm' is missing
+Error: required option 'host' is missing
+"""
         )
 
     def test_bad_options(self):
         self.assert_pcs_fail(
-            "quorum device add a=b timeout=-1 model net host=127.0.0.1 port=x c=d",
+            "quorum device add a=b timeout=-1 model net host=127.0.0.1 algorithm=x c=d",
             """\
+Error: 'x' is not a valid algorithm value, use 2nodelms, ffsplit, lms, use --force to override
 Error: invalid quorum device model option 'c', allowed options are: algorithm, connect_timeout, force_ip_version, host, port, tie_breaker, use --force to override
-Error: 'x' is not a valid port value, use 1-65535, use --force to override
 Error: invalid quorum device option 'a', allowed options are: sync_timeout, timeout, use --force to override
 Error: '-1' is not a valid timeout value, use positive integer, use --force to override
 """
         )
 
         self.assert_pcs_success(
-            "quorum device add a=b timeout=-1 model net host=127.0.0.1 port=x c=d --force",
+            "quorum device add a=b timeout=-1 model net host=127.0.0.1 algorithm=x c=d --force",
             """\
+Warning: 'x' is not a valid algorithm value, use 2nodelms, ffsplit, lms
 Warning: invalid quorum device model option 'c', allowed options are: algorithm, connect_timeout, force_ip_version, host, port, tie_breaker
-Warning: 'x' is not a valid port value, use 1-65535
 Warning: invalid quorum device option 'a', allowed options are: sync_timeout, timeout
 Warning: '-1' is not a valid timeout value, use positive integer
 """
@@ -209,9 +217,9 @@ Device:
   a: b
   timeout: -1
   Model: net
+    algorithm: x
     c: d
     host: 127.0.0.1
-    port: x
 """
         )
 
diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
index 3ccfe9b..e8c0813 100644
--- a/pcs/test/test_resource.py
+++ b/pcs/test/test_resource.py
@@ -210,7 +210,7 @@ the health of a system via IPMI.
  ClusterIP4\t(ocf::heartbeat:IPaddr2):\tStopped
  ClusterIP5\t(ocf::heartbeat:IPaddr2):\tStopped
  ClusterIP6\t(ocf::heartbeat:IPaddr2):\tStopped
- ClusterIP7\t(ocf::heartbeat:IPaddr2):\t(target-role:Stopped) Stopped
+ ClusterIP7\t(ocf::heartbeat:IPaddr2):\tStopped (disabled)
 """)
 
         output, returnVal = pcs(temp_cib, "resource show ClusterIP6 --full")
diff --git a/pcs/test/test_stonith.py b/pcs/test/test_stonith.py
index 1257399..479c8e9 100644
--- a/pcs/test/test_stonith.py
+++ b/pcs/test/test_stonith.py
@@ -26,10 +26,10 @@ class StonithTest(unittest.TestCase):
     def testStonithCreation(self):
         output, returnVal = pcs(temp_cib, "stonith create test1 fence_noxist")
         assert returnVal == 1
-        assert output == "Error: fence agent 'fence_noxist' not found, use --force to override\n"
+        assert output == "Error: Agent 'fence_noxist' not found, use --force to override\n"
 
         output, returnVal = pcs(temp_cib, "stonith create test1 fence_noxist --force")
-        ac(output, "Warning: fence agent 'fence_noxist' not found\n")
+        ac(output, "Warning: Agent 'fence_noxist' not found\n")
         self.assertEqual(returnVal, 0)
 
         output, returnVal = pcs(temp_cib, "stonith create test2 fence_apc")
diff --git a/pcs/test/tools/assertions.py b/pcs/test/tools/assertions.py
index 690a7d4..1151809 100644
--- a/pcs/test/tools/assertions.py
+++ b/pcs/test/tools/assertions.py
@@ -93,6 +93,33 @@ class AssertPcsMixin(object):
         return output
 
 
+class ExtendedAssertionsMixin(object):
+    def assert_raises(
+        self, expected_exception, callable_obj, property_dict=None
+    ):
+        if property_dict is None:
+            property_dict = {}
+        try:
+            callable_obj()
+            raise AssertionError(
+                "No exception raised. Expected exception: {exception}".format(
+                    exception=expected_exception.__class__.__name__
+                )
+            )
+        except expected_exception as e:
+            for prop, value in property_dict.items():
+                try:
+                    self.assertEqual(value, getattr(e, prop))
+                except AttributeError:
+                    raise AssertionError(
+                        "Property {property} doesn't exist in exception"
+                        " {exception}".format(
+                            property=prop,
+                            exception=expected_exception.__class__.__name__
+                        )
+                    )
+
+
 def assert_xml_equal(expected_xml, got_xml):
     checker = LXMLOutputChecker()
     if not checker.check_output(expected_xml, got_xml, 0):
diff --git a/pcs/usage.py b/pcs/usage.py
index 50e1fd1..9d24b78 100644
--- a/pcs/usage.py
+++ b/pcs/usage.py
@@ -19,6 +19,7 @@ def full_usage():
     out += strip_extras(constraint([],False))
     out += strip_extras(node([],False))
     out += strip_extras(acl([],False))
+    out += strip_extras(qdevice([],False))
     out += strip_extras(quorum([],False))
     out += strip_extras(status([],False))
     out += strip_extras(config([],False))
@@ -108,6 +109,7 @@ def generate_completion_tree_from_usage():
     tree["property"] = generate_tree(property([],False))
     tree["acl"] = generate_tree(acl([],False))
     tree["constraint"] = generate_tree(constraint([],False))
+    tree["qdevice"] = generate_tree(qdevice([],False))
     tree["quorum"] = generate_tree(quorum([],False))
     tree["status"] = generate_tree(status([],False))
     tree["config"] = generate_tree(config([],False))
@@ -149,23 +151,24 @@ Usage: pcs [-f file] [-h] [commands]...
 Control and configure pacemaker and corosync.
 
 Options:
-    -h, --help  Display usage and exit
-    -f file     Perform actions on file instead of active CIB
-    --debug     Print all network traffic and external commands run
-    --version   Print pcs version information
+    -h, --help  Display usage and exit.
+    -f file     Perform actions on file instead of active CIB.
+    --debug     Print all network traffic and external commands run.
+    --version   Print pcs version information.
 
 Commands:
-    cluster     Configure cluster options and nodes
-    resource    Manage cluster resources
-    stonith     Configure fence devices
-    constraint  Set resource constraints
-    property    Set pacemaker properties
-    acl         Set pacemaker access control lists
-    quorum      Manage cluster quorum settings
-    status      View cluster status
-    config      View and manage cluster configuration
-    pcsd        Manage pcs daemon
-    node        Manage cluster nodes
+    cluster     Configure cluster options and nodes.
+    resource    Manage cluster resources.
+    stonith     Configure fence devices.
+    constraint  Set resource constraints.
+    property    Set pacemaker properties.
+    acl         Set pacemaker access control lists.
+    qdevice     Manage quorum device provider.
+    quorum      Manage cluster quorum settings.
+    status      View cluster status.
+    config      View and manage cluster configuration.
+    pcsd        Manage pcs daemon.
+    node        Manage cluster nodes.
 """
 # Advanced usage to possibly add later
 #  --corosync_conf=<corosync file> Specify alternative corosync.conf file
@@ -194,7 +197,7 @@ Commands:
         of resources are not printed.
 
     describe <standard:provider:type|type>
-        Show options for the specified resource
+        Show options for the specified resource.
 
     create <resource id> <standard:provider:type|type> [resource options]
            [op <operation action> <operation options> [<operation action>
@@ -214,7 +217,7 @@ Commands:
         to 60 minutes.
         Example: Create a new resource called 'VirtualIP' with IP address
             192.168.0.99, netmask of 32, monitored everything 30 seconds,
-            on eth2.
+            on eth2:
             pcs resource create VirtualIP ocf:heartbeat:IPaddr2 \\
                 ip=192.168.0.99 cidr_netmask=32 nic=eth2 \\
                 op monitor interval=30s
@@ -320,14 +323,14 @@ Commands:
         specified it defaults to 60 minutes.
 
     standards
-        List available resource agent standards supported by this installation.
-        (OCF, LSB, etc.)
+        List available resource agent standards supported by this installation
+        (OCF, LSB, etc.).
 
     providers
-        List available OCF resource agent providers
+        List available OCF resource agent providers.
 
     agents [standard[:provider]]
-        List available agents optionally filtered by standard and provider
+        List available agents optionally filtered by standard and provider.
 
     update <resource id> [resource options] [op [<operation action>
            <operation options>]...] [meta <meta operations>...] [--wait[=n]]
@@ -343,18 +346,18 @@ Commands:
         otherwise.  If 'n' is not specified it defaults to 60 minutes.
 
     op add <resource id> <operation action> [operation properties]
-        Add operation for specified resource
+        Add operation for specified resource.
 
     op remove <resource id> <operation action> [<operation properties>...]
         Remove specified operation (note: you must specify the exact operation
         properties to properly remove an existing operation).
 
     op remove <operation id>
-        Remove the specified operation id
+        Remove the specified operation id.
 
     op defaults [options]
         Set default values for operations, if no options are passed, lists
-        currently configured defaults
+        currently configured defaults.
 
     meta <resource id | group id | master id | clone id> <meta options>
          [--wait[=n]]
@@ -386,7 +389,7 @@ Commands:
         specified it defaults to 60 minutes.
 
     ungroup <group id> [resource id] ... [resource id] [--wait[=n]]
-        Remove the group (Note: this does not remove any resources from the
+        Remove the group (note: this does not remove any resources from the
         cluster) or if resources are specified, remove the specified resources
         from the group.  If --wait is specified, pcs will wait up to 'n' seconds
         for the operation to finish (including moving resources if appropriate)
@@ -416,14 +419,14 @@ Commands:
         Note: to remove a master you must remove the resource/group it contains.
 
     manage <resource id> ... [resource n]
-        Set resources listed to managed mode (default)
+        Set resources listed to managed mode (default).
 
     unmanage <resource id> ... [resource n]
-        Set resources listed to unmanaged mode
+        Set resources listed to unmanaged mode.
 
     defaults [options]
         Set default values for resources, if no options are passed, lists
-        currently configured defaults
+        currently configured defaults.
 
     cleanup [<resource id>] [--node <node>]
         Cleans up the resource in the lrmd (useful to reset the resource status
@@ -436,7 +439,7 @@ Commands:
 
     failcount show <resource id> [node]
         Show current failcount for specified resource from all nodes or
-        only on specified node
+        only on specified node.
 
     failcount reset <resource id> [node]
         Reset failcount for specified resource on all nodes or only on
@@ -476,25 +479,25 @@ Commands:
 Examples:
 
     pcs resource show
-      Show all resources
+      Show all resources.
 
     pcs resource show VirtualIP
-      Show options specific to the 'VirtualIP' resource
+      Show options specific to the 'VirtualIP' resource.
 
 
     pcs resource create VirtualIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 \\
                cidr_netmask=32 nic=eth2 op monitor interval=30s
-      Create a new resource called 'VirtualIP' with options
+      Create a new resource called 'VirtualIP' with options.
 
     pcs resource create VirtualIP IPaddr2 ip=192.168.0.99 \\
                cidr_netmask=32 nic=eth2 op monitor interval=30s
-      Create a new resource called 'VirtualIP' with options
+      Create a new resource called 'VirtualIP' with options.
 
     pcs resource update VirtualIP ip=192.168.0.98 nic=
-      Change the ip address of VirtualIP and remove the nic option
+      Change the ip address of VirtualIP and remove the nic option.
 
     pcs resource delete VirtualIP
-      Delete the VirtualIP resource
+      Delete the VirtualIP resource.
 
 Notes:
     Starting resources on a cluster is (almost) always done by pacemaker and
@@ -549,14 +552,17 @@ Commands:
             'passive' is supported or tested (using 'active' is not
             recommended).
         The --wait_for_all, --auto_tie_breaker, --last_man_standing,
-        --last_man_standing_window options are all documented in corosync's
-        votequorum(5) man page.
-        --ipv6 will configure corosync to use ipv6 (instead of ipv4)
+            --last_man_standing_window options are all documented in corosync's
+            votequorum(5) man page. These options are not supported on CMAN
+            clusters.
+        --ipv6 will configure corosync to use ipv6 (instead of ipv4). This
+            option is not supported on CMAN clusters.
         --token <timeout> sets time in milliseconds until a token loss is
             declared after not receiving a token (default 1000 ms)
         --token_coefficient <timeout> sets time in milliseconds used for clusters
             with at least 3 nodes as a coefficient for real token timeout calculation
             (token + (number_of_nodes - 2) * token_coefficient) (default 650 ms)
+            This option is not supported on CMAN clusters.
         --join <timeout> sets time in milliseconds to wait for join messages
             (default 50 ms)
         --consensus <timeout> sets time in milliseconds to wait for consensus
@@ -599,7 +605,9 @@ Commands:
 
     kill
         Force corosync and pacemaker daemons to stop on the local node
-        (performs kill -9).
+        (performs kill -9). Note that init system (e.g. systemd) can detect that
+        cluster is not running and start it again. If you want to stop cluster
+        on a node, run pcs cluster stop on that node.
 
     enable [--all] [node] [...]
         Configure corosync & pacemaker to run on node boot on specified
@@ -611,44 +619,36 @@ Commands:
         Configure corosync & pacemaker to not run on node boot on specified
         node(s), if node is not specified then corosync & pacemaker are
         disabled on the local node. If --all is specified then corosync &
-        pacemaker are disabled on all nodes. (Note: this is the default after
-        installation)
+        pacemaker are disabled on all nodes. Note: this is the default after
+        installation.
 
     remote-node add <hostname> <resource id> [options]
         Enables the specified resource as a remote-node resource on the
-        specified hostname (hostname should be the same as 'uname -n')
+        specified hostname (hostname should be the same as 'uname -n').
 
     remote-node remove <hostname>
         Disables any resources configured to be remote-node resource on the
-        specified hostname (hostname should be the same as 'uname -n')
+        specified hostname (hostname should be the same as 'uname -n').
 
     status
-        View current cluster status (an alias of 'pcs status cluster')
+        View current cluster status (an alias of 'pcs status cluster').
 
     pcsd-status [node] [...]
         Get current status of pcsd on nodes specified, or on all nodes
-        configured in corosync.conf if no nodes are specified
+        configured in corosync.conf if no nodes are specified.
 
     sync
         Sync corosync configuration to all nodes found from current
-        corosync.conf file (cluster.conf on systems running Corosync 1.x)
-
-    quorum unblock
-        Cancel waiting for all nodes when establishing quorum.  Useful in
-        situations where you know the cluster is inquorate, but you are
-        confident that the cluster should proceed with resource management
-        regardless.
+        corosync.conf file (cluster.conf on systems running Corosync 1.x).
 
     cib [filename] [scope=<scope> | --config]
-        Get the raw xml from the CIB (Cluster Information Base).  If a
-        filename is provided, we save the cib to that file, otherwise the cib
-        is printed.  Specify scope to get a specific section of the CIB.  Valid
+        Get the raw xml from the CIB (Cluster Information Base).  If a filename
+        is provided, we save the CIB to that file, otherwise the CIB is
+        printed.  Specify scope to get a specific section of the CIB.  Valid
         values of the scope are: configuration, nodes, resources, constraints,
-        crm_config, rsc_defaults, op_defaults, status.  --config is the same
-        as scope=configuration.  Use of --config is recommended.  Do not specify
-        a scope if you need to get the whole CIB or be warned in the case
-        of outdated CIB on cib-push.
-
+        crm_config, rsc_defaults, op_defaults, status.  --config is the same as
+        scope=configuration.  Do not specify a scope if you want to edit
+        the saved CIB using pcs (pcs -f <command>).
 
     cib-push <filename> [scope=<scope> | --config]
         Push the raw xml from <filename> to the CIB (Cluster Information Base).
@@ -665,7 +665,7 @@ Commands:
         current content of the specified file.
 
     cib-upgrade
-        Upgrade the CIB to conform to the latest version of the document schema
+        Upgrade the CIB to conform to the latest version of the document schema.
 
     edit [scope=<scope> | --config]
         Edit the cib in the editor specified by the $EDITOR environment
@@ -687,26 +687,26 @@ Commands:
 
     node remove <node>
         Shutdown specified node and remove it from pacemaker and corosync on
-        all other nodes in the cluster
+        all other nodes in the cluster.
 
     uidgid
         List the current configured uids and gids of users allowed to connect
-        to corosync
+        to corosync.
 
     uidgid add [uid=<uid>] [gid=<gid>]
         Add the specified uid and/or gid to the list of users/groups
-        allowed to connect to corosync
+        allowed to connect to corosync.
 
     uidgid rm [uid=<uid>] [gid=<gid>]
         Remove the specified uid and/or gid from the list of users/groups
-        allowed to connect to corosync
+        allowed to connect to corosync.
 
     corosync [node]
         Get the corosync.conf from the specified node or from the current node
-        if node not specified
+        if node not specified.
 
     reload corosync
-        Reload the corosync configuration on the current node
+        Reload the corosync configuration on the current node.
 
     destroy [--all]
         Permanently destroy the cluster on the current node, killing all
@@ -721,7 +721,7 @@ Commands:
         Checks the pacemaker configuration (cib) for syntax and common
         conceptual errors.  If no filename is specified the check is
         performed on the currently running cluster.  If -V is used
-        more verbose output will be printed
+        more verbose output will be printed.
 
     report [--from "YYYY-M-D H:M:S" [--to "YYYY-M-D" H:M:S"]] dest
         Create a tarball containing everything needed when reporting cluster
@@ -742,7 +742,7 @@ Commands:
     [show [stonith id]] [--full]
         Show all currently configured stonith devices or if a stonith id is
         specified show the options for the configured stonith device.  If
-        --full is specified all configured stonith options will be displayed
+        --full is specified all configured stonith options will be displayed.
 
     list [filter] [--nodesc]
         Show list of all available stonith agents (if filter is provided then
@@ -750,18 +750,18 @@ Commands:
         used then descriptions of stonith agents are not printed.
 
     describe <stonith agent>
-        Show options for specified stonith agent
+        Show options for specified stonith agent.
 
     create <stonith id> <stonith device type> [stonith device options]
            [op <operation action> <operation options> [<operation action>
            <operation options>]...] [meta <meta options>...]
-        Create stonith device with specified type and options
+        Create stonith device with specified type and options.
 
     update <stonith id> [stonith device options]
-        Add/Change options to specified stonith id
+        Add/Change options to specified stonith id.
 
     delete <stonith id>
-        Remove stonith id from configuration
+        Remove stonith id from configuration.
 
     cleanup [<stonith id>] [--node <node>]
         Cleans up the stonith device in the lrmd (useful to reset the status
@@ -773,7 +773,7 @@ Commands:
         on all nodes will be cleaned up.
 
     level
-        Lists all of the fencing levels currently configured
+        Lists all of the fencing levels currently configured.
 
     level add <level> <node> <devices>
         Add the fencing level for the specified node with a comma separated
@@ -784,8 +784,8 @@ Commands:
         fenced.
 
     level remove <level> [node id] [stonith id] ... [stonith id]
-        Removes the fence level for the level, node and/or devices specified
-        If no nodes or devices are specified then the fence level is removed
+        Removes the fence level for the level, node and/or devices specified.
+        If no nodes or devices are specified then the fence level is removed.
 
     level clear [node|stonith id(s)]
         Clears the fence levels on the node (or stonith id) specified or clears
@@ -794,19 +794,48 @@ Commands:
         spaces.  Example: pcs stonith level clear dev_a,dev_b
 
     level verify
-        Verifies all fence devices and nodes specified in fence levels exist
+        Verifies all fence devices and nodes specified in fence levels exist.
 
     fence <node> [--off]
         Fence the node specified (if --off is specified, use the 'off' API
-        call to stonith which will turn the node off instead of rebooting it)
+        call to stonith which will turn the node off instead of rebooting it).
 
-    confirm <node>
+    confirm <node> [--force]
         Confirm that the host specified is currently down.  This command
-        should ONLY be used when the node specified has already been
-        confirmed to be down.
+        should ONLY be used when the node specified has already been confirmed
+        to be powered off and to have no access to shared resources.
+
+        WARNING: If this node is not actually powered off or it does have
+        access to shared resources, data corruption/cluster failure can occur.
+        To prevent accidental running of this command, --force or interactive
+        user response is required in order to proceed.
+
+    sbd enable [--watchdog=<path>[@<node>]] ... [<SBD_OPTION>=<value>] ...
+        Enable SBD in cluster. Default path for watchdog device is
+        /dev/watchdog. Allowed SBD options: SBD_WATCHDOG_TIMEOUT (default: 5),
+        SBD_DELAY_START (default: no) and SBD_STARTMODE (default: clean).
+
+        WARNING: Cluster has to be restarted in order to apply these changes.
+
+        Example of enabling SBD in cluster with watchdogs on node1 will be
+        /dev/watchdog2, on node2 /dev/watchdog1, /dev/watchdog0 on all other
+        nodes and watchdog timeout will bet set to 10 seconds:
+        pcs stonith sbd enable \\
+            --watchdog=/dev/watchdog2 at node1 \\
+            --watchdog=/dev/watchdog1 at node2 \\
+            --watchdog=/dev/watchdog0 \\
+            SBD_WATCHDOG_TIMEOUT=10
+
+    sbd disable
+        Disable SBD in cluster.
+
+        WARNING: Cluster has to be restarted in order to apply these changes.
 
-        WARNING: if this node is not actually down data corruption/cluster
-        failure can occur.
+    sbd status
+        Show status of SBD services in cluster.
+
+    sbd config
+        Show SBD configuration in cluster.
 
 Examples:
     pcs stonith create MyStonith fence_virt pcmk_host_list=f1
@@ -863,11 +892,11 @@ Commands:
 
     location <resource id> prefers <node[=score]>...
         Create a location constraint on a resource to prefer the specified
-        node and score (default score: INFINITY)
+        node and score (default score: INFINITY).
 
     location <resource id> avoids <node[=score]>...
         Create a location constraint on a resource to avoid the specified
-        node and score (default score: INFINITY)
+        node and score (default score: INFINITY).
 
     location <resource id> rule [id=<rule id>] [resource-discovery=<option>]
              [role=master|slave] [constraint-id=<id>]
@@ -888,7 +917,7 @@ Commands:
         generated from the resource id. If resource-discovery is omitted it
         defaults to 'always'.
 
-    location show [resources|nodes [node id|resource id]...] [--full]
+    location [show [resources|nodes [node id|resource id]...] [--full]]
         List all the current location constraints, if 'resources' is specified
         location constraints are displayed per resource (default), if 'nodes'
         is specified location constraints are displayed per node.  If specific
@@ -897,13 +926,13 @@ Commands:
 
     location add <id> <resource id> <node> <score> [resource-discovery=<option>]
         Add a location constraint with the appropriate id, resource id,
-        node name and score. (For more advanced pacemaker usage)
+        node name and score. (For more advanced pacemaker usage.)
 
     location remove <id> [<resource id> <node> <score>]
         Remove a location constraint with the appropriate id, resource id,
-        node name and score. (For more advanced pacemaker usage)
+        node name and score. (For more advanced pacemaker usage.)
 
-    order show [--full]
+    order [show] [--full]
         List all current ordering constraints (if --full is specified show
         the internal constraint id's as well).
 
@@ -926,7 +955,7 @@ Commands:
     order remove <resource1> [resourceN]...
         Remove resource from any ordering constraint
 
-    colocation show [--full]
+    colocation [show] [--full]
         List all current colocation constraints (if --full is specified show
         the internal constraint id's as well).
 
@@ -951,12 +980,18 @@ Commands:
         score-attribute-mangle.
 
     colocation remove <source resource id> <target resource id>
-        Remove colocation constraints with <source resource>
+        Remove colocation constraints with specified resources.
 
-    ticket show [--full]
+    ticket [show] [--full]
         List all current ticket constraints (if --full is specified show
         the internal constraint id's as well).
 
+    ticket add <ticket> [<role>] <resource id> [options]
+               [id=constraint-id]
+        Create a ticket constraint for <resource id>.
+        Available option is loss-policy=fence/stop/freeze/demote.
+        A role can be master, slave, started or stopped.
+
     ticket set <resource1> [resourceN]... [options]
                [set <resourceX> ... [options]]
                [setoptions [constraint_options]]
@@ -964,19 +999,13 @@ Commands:
         Available options are sequential=true/false, require-all=true/false,
         action=start/promote/demote/stop and role=Stopped/Started/Master/Slave.
         Required constraint option is ticket.
-        Optional constraint option is loss-policy=fence/stop/freeze/demote
-
-    ticket add <ticket> [<role>] <resource id> [options]
-               [id=constraint-id]
-        Crate a ticket constraint for <resource id>.
-        Available option is loss-policy=fence/stop/freeze/demote.
-        A role can be master, slave, started or stopped.
+        Optional constraint option is loss-policy=fence/stop/freeze/demote.
 
     remove [constraint id]...
-        Remove constraint(s) or constraint rules with the specified id(s)
+        Remove constraint(s) or constraint rules with the specified id(s).
 
     ref <resource>...
-        List constraints referencing specified resource
+        List constraints referencing specified resource.
 
     rule add <constraint id> [id=<rule id>] [role=master|slave]
              [score=<score>|score-attribute=<attribute>] <expression>
@@ -997,7 +1026,7 @@ Commands:
 
     rule remove <rule id>
         Remove a rule if a rule id is specified, if rule is last rule in its
-        constraint, the constraint will be removed
+        constraint, the constraint will be removed.
 """
     if pout:
         print(sub_usage(args, output))
@@ -1011,53 +1040,53 @@ View and modify current cluster access control lists
 Commands:
 
     [show]
-        List all current access control lists
+        List all current access control lists.
 
     enable
-        Enable access control lists
+        Enable access control lists.
 
     disable
-        Disable access control lists
+        Disable access control lists.
 
     role create <role id> [description=<description>] [((read | write | deny)
                                                 (xpath <query> | id <id>))...]
         Create a role with the id and (optional) description specified.
         Each role can also have an unlimited number of permissions
         (read/write/deny) applied to either an xpath query or the id
-        of a specific element in the cib
+        of a specific element in the cib.
 
     role delete <role id>
         Delete the role specified and remove it from any users/groups it was
-        assigned to
+        assigned to.
 
     role assign <role id> [to] <username/group>
         Assign a role to a user or group already created with 'pcs acl
-        user/group create'
+        user/group create'.
 
     role unassign <role id> [from] <username/group>
-        Remove a role from the specified user
+        Remove a role from the specified user.
 
     user create <username> <role id> [<role id>]...
-        Create an ACL for the user specified and assign roles to the user
+        Create an ACL for the user specified and assign roles to the user.
 
     user delete <username>
         Remove the user specified (and roles assigned will be unassigned for
-        the specified user)
+        the specified user).
 
     group create <group> <role id> [<role id>]...
-        Create an ACL for the group specified and assign roles to the group
+        Create an ACL for the group specified and assign roles to the group.
 
     group delete <group>
         Remove the group specified (and roles assigned will be unassigned for
-        the specified group)
+        the specified group).
 
     permission add <role id> ((read | write | deny) (xpath <query> |
                                                                 id <id>))...
-        Add the listed permissions to the role specified
+        Add the listed permissions to the role specified.
 
     permission delete <permission id>
         Remove the permission id specified (permission id's are listed in
-        parenthesis after permissions in 'pcs acl' output)
+        parenthesis after permissions in 'pcs acl' output).
 """
     if pout:
         print(sub_usage(args, output))
@@ -1071,19 +1100,19 @@ View current cluster and resource status
 Commands:
     [status] [--full | --hide-inactive]
         View all information about the cluster and resources (--full provides
-        more details, --hide-inactive hides inactive resources)
+        more details, --hide-inactive hides inactive resources).
 
     resources
-        View current status of cluster resources
+        View current status of cluster resources.
 
     groups
-        View currently configured groups and their resources
+        View currently configured groups and their resources.
 
     cluster
-        View current cluster status
+        View current cluster status.
 
     corosync
-        View current membership information as seen by corosync
+        View current membership information as seen by corosync.
 
     nodes [corosync|both|config]
         View current status of nodes from pacemaker. If 'corosync' is
@@ -1096,7 +1125,7 @@ Commands:
         When no nodes are specified, status of all nodes is displayed.
 
     xml
-        View xml version of status (output from crm_mon -r -1 -X)
+        View xml version of status (output from crm_mon -r -1 -X).
 """
     if pout:
         print(sub_usage(args, output))
@@ -1110,7 +1139,7 @@ View and manage cluster configuration
 
 Commands:
     [show]
-        View full cluster configuration
+        View full cluster configuration.
 
     backup [filename]
         Creates the tarball containing the cluster configuration files.
@@ -1237,6 +1266,44 @@ Commands:
     else:
         return output
 
+def qdevice(args=[], pout=True):
+    output = """
+Usage: pcs qdevice <command>
+Manage quorum device provider on the local host
+
+Commands:
+    setup model <device model> [--enable] [--start]
+        Configure specified model of quorum device provider.  Quorum device then
+        may be added to clusters by "pcs quorum device add" command.
+        --start will also start the provider.  --enable will configure
+        the provider to start on boot.
+
+    destroy <device model>
+        Disable and stop specified model of quorum device provider and delete
+        its configuration files.
+
+    start <device model>
+        Start specified model of quorum device provider.
+
+    stop <device model>
+        Stop specified model of quorum device provider.
+
+    kill <device model>
+        Force specified model of quorum device provider to stop (performs
+        kill -9).
+
+    enable <device model>
+        Configure specified model of quorum device provider to start on boot.
+
+    disable <device model>
+        Configure specified model of quorum device provider to not start
+        on boot.
+"""
+    if pout:
+        print(sub_usage(args, output))
+    else:
+        return output
+
 def quorum(args=[], pout=True):
     output = """
 Usage: pcs quorum <command>
@@ -1247,7 +1314,8 @@ Commands:
         Show quorum configuration.
 
     device add [generic options] model <device model> [model options]
-        Add quorum device to cluster.
+        Add quorum device to cluster.  Quorum device needs to be created first
+        by "pcs qdevice setup" command.
 
     device remove
         Remove quorum device from cluster.
@@ -1255,6 +1323,19 @@ Commands:
     device update [generic options] [model <model options>]
         Add/Change quorum device options.  Requires cluster to be stopped.
 
+    unblock [--force]
+        Cancel waiting for all nodes when establishing quorum.  Useful in
+        situations where you know the cluster is inquorate, but you are
+        confident that the cluster should proceed with resource management
+        regardless.  This command should ONLY be used when nodes which
+        the cluster is waiting for have been confirmed to be powered off and
+        to have no access to shared resources.
+
+        WARNING: If the nodes are not actually powered off or they do have
+        access to shared resources, data corruption/cluster failure can occur.
+        To prevent accidental running of this command, --force or interactive
+        user response is required in order to proceed.
+
     update [auto_tie_breaker=[0|1]] [last_man_standing=[0|1]]
             [last_man_standing_window=[<time in ms>]] [wait_for_all=[0|1]]
         Add/Change quorum options.  At least one option must be specified.
@@ -1275,6 +1356,7 @@ def show(main_usage_name, rest_usage_names):
         "node": node,
         "pcsd": pcsd,
         "property": property,
+        "qdevice": qdevice,
         "quorum": quorum,
         "resource": resource,
         "status": status,
diff --git a/pcs/utils.py b/pcs/utils.py
index 9041fd4..11bd4cf 100644
--- a/pcs/utils.py
+++ b/pcs/utils.py
@@ -56,20 +56,26 @@ except ImportError:
 
 
 from pcs import settings, usage
+from pcs.common import report_codes
 from pcs.cli.common.reports import (
-    process_library_reports as process_lib_reports,
+    process_library_reports,
     LibraryReportProcessorToConsole as LibraryReportProcessorToConsole,
 )
 from pcs.common.tools import simple_cache
+from pcs.lib import reports
 from pcs.lib.env import LibraryEnvironment
-from pcs.lib.errors import LibraryError
+from pcs.lib.errors import LibraryError, ReportItemSeverity
 import pcs.lib.corosync.config_parser as corosync_conf_parser
 from pcs.lib.external import (
     is_cman_cluster,
     CommandRunner,
+    is_service_running,
+    is_service_enabled,
+    is_systemctl,
 )
 import pcs.lib.resource_agent as lib_ra
 from pcs.lib.corosync.config_facade import ConfigFacade as corosync_conf_facade
+from pcs.lib.nodes_task import check_corosync_offline_on_nodes
 from pcs.lib.pacemaker import has_resource_wait_support
 from pcs.lib.pacemaker_state import ClusterState
 from pcs.lib.pacemaker_values import(
@@ -680,6 +686,51 @@ def autoset_2node_corosync(corosync_conf):
     facade._ConfigFacade__update_two_node()
     return facade.config
 
+# when adding or removing a node, changing number of nodes to or from two,
+# we need to change qdevice algorith lms <-> 2nodelms, which cannot be done when
+# the cluster is running
+def check_qdevice_algorithm_and_running_cluster(corosync_conf, add=True):
+    if is_rhel6():
+        return
+    facade = corosync_conf_facade.from_string(corosync_conf)
+    if not facade.has_quorum_device():
+        return
+    node_list = facade.get_nodes()
+    node_count_target = len(node_list) + (1 if add else -1)
+    model, model_opts, dummy_generic_opts = facade.get_quorum_device_settings()
+    if model != "net":
+        return
+    algorithm = model_opts.get("algorithm", "")
+    need_stopped = (
+        (algorithm == "lms" and node_count_target == 2)
+        or
+        (algorithm == "2nodelms" and node_count_target != 2)
+    )
+    if not need_stopped:
+        return
+
+    try:
+        lib_env = get_lib_env()
+        check_corosync_offline_on_nodes(
+            lib_env.node_communicator(),
+            lib_env.report_processor,
+            node_list,
+            get_modificators()["skip_offline_nodes"]
+        )
+    except LibraryError as e:
+        report_item_list = list(e.args)
+        for report_item in report_item_list:
+            if (
+                report_item.code == report_codes.COROSYNC_RUNNING_ON_NODE
+                and
+                report_item.severity == ReportItemSeverity.ERROR
+            ):
+                report_item_list.append(
+                    reports.qdevice_remove_or_cluster_stop_needed()
+                )
+                break
+        process_library_reports(report_item_list)
+
 def getNextNodeID(corosync_conf):
     currentNodes = []
     highest = 0
@@ -1402,6 +1453,10 @@ def get_default_op_values(ra_type):
         lib_ra.UnableToGetAgentMetadata
     ):
         return []
+    except lib_ra.ResourceAgentLibError as e:
+        process_library_reports(
+            [lib_ra.resource_agent_lib_error_to_report_item(e)]
+        )
     except LibraryError as e:
         process_library_reports(e.args)
 
@@ -1446,7 +1501,7 @@ def is_valid_resource(resource, caseInsensitiveCheck=False):
         return True
     except lib_ra.UnsupportedResourceAgent:
         pass
-    except LibraryError:
+    except (lib_ra.ResourceAgentLibError, LibraryError):
         # agent not exists or obtaining metadata failed
         return False
 
@@ -1485,7 +1540,7 @@ def is_valid_resource(resource, caseInsensitiveCheck=False):
                     "ocf:{0}:{1}".format(provider, resource)
                 )
                 return True
-            except LibraryError:
+            except (LibraryError, lib_ra.ResourceAgentLibError):
                 continue
     return False
 
@@ -1778,6 +1833,13 @@ def stonithCheck():
         if p.attrib["class"] == "stonith":
             return False
 
+    # check if SBD daemon is running
+    try:
+        if is_service_running(cmd_runner(), "sbd"):
+            return False
+    except LibraryError:
+        pass
+
     return True
 
 def getCorosyncNodesID(allow_failure=False):
@@ -1969,16 +2031,6 @@ def verify_cert_key_pair(cert, key):
 
     return errors
 
-def is_systemctl():
-    systemctl_paths = [
-        '/usr/bin/systemctl',
-        '/bin/systemctl',
-        '/var/run/systemd/system',
-    ]
-    for path in systemctl_paths:
-        if os.path.exists(path):
-            return True
-    return False
 
 @simple_cache
 def is_rhel6():
@@ -1990,12 +2042,6 @@ def err(errorText, exit_after_error=True):
         sys.exit(1)
 
 
-def process_library_reports(report_item_list):
-    """
-    report_item_list list of ReportItem
-    """
-    process_lib_reports(report_item_list, "--force" in pcs_options)
-
 def serviceStatus(prefix):
     if not is_systemctl():
         return
@@ -2006,6 +2052,17 @@ def serviceStatus(prefix):
             run(["systemctl", 'is-active', service])[0].strip(),
             run(["systemctl", 'is-enabled', service])[0].strip()
         ))
+    try:
+        sbd_running = is_service_running(cmd_runner(), "sbd")
+        sbd_enabled = is_service_enabled(cmd_runner(), "sbd")
+        if sbd_enabled or sbd_running:
+            print("{prefix}sbd: {active}/{enabled}".format(
+                prefix=prefix,
+                active=("active" if sbd_running else "inactive"),
+                enabled=("enabled" if sbd_enabled else "disabled")
+            ))
+    except LibraryError:
+        pass
 
 def enableServices():
     if is_rhel6():
@@ -2594,11 +2651,14 @@ def get_modificators():
     #there is possible create class extending dict, so dict like access in
     #commands is not an issue
     return {
-        "full": "--full" in pcs_options,
         "autocorrect": "--autocorrect" in pcs_options,
+        "corosync_conf": pcs_options.get("--corosync_conf", None),
+        "enable": "--enable" in pcs_options,
         "force": "--force" in pcs_options,
+        "full": "--full" in pcs_options,
         "skip_offline_nodes": "--skip-offline" in pcs_options,
-        "corosync_conf": pcs_options.get("--corosync_conf", None),
+        "start": "--start" in pcs_options,
+        "watchdog": pcs_options.get("--watchdog", []),
     }
 
 def exit_on_cmdline_input_errror(error, main_name, usage_name):
diff --git a/pcsd/bootstrap.rb b/pcsd/bootstrap.rb
index 6b2cc84..2fa34af 100644
--- a/pcsd/bootstrap.rb
+++ b/pcsd/bootstrap.rb
@@ -43,7 +43,7 @@ def get_pcs_path(pcsd_path)
   end
 end
 
-PCS_VERSION = '0.9.151'
+PCS_VERSION = '0.9.152'
 COROSYNC = COROSYNC_BINARIES + "corosync"
 ISRHEL6 = is_rhel6
 ISSYSTEMCTL = is_systemctl
diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb
index 1da29e2..f54cd30 100644
--- a/pcsd/cluster_entity.rb
+++ b/pcsd/cluster_entity.rb
@@ -1000,7 +1000,7 @@ module ClusterEntity
   class Node < JSONable
     attr_accessor :id, :error_list, :warning_list, :status, :quorum, :uptime,
                   :name, :corosync, :pacemaker, :cman, :corosync_enabled,
-                  :pacemaker_enabled, :pcsd_enabled
+                  :pacemaker_enabled, :pcsd_enabled, :services, :sbd_config
 
     def initialize
       @id = nil
@@ -1010,22 +1010,40 @@ module ClusterEntity
       @quorum = nil
       @uptime = 'unknown'
       @name = nil
+      @services = {}
+      [:pacemaker, :corosync, :pcsd, :cman, :sbd].each do |service|
+        @services[service] = {
+          :installed => nil,
+          :running => nil,
+          :enabled => nil
+        }
+      end
       @corosync = false
       @pacemaker = false
       @cman = false
       @corosync_enabled = false
       @pacemaker_enabled = false
       @pcsd_enabled = false
+      @sbd_config = nil
     end
 
     def self.load_current_node(crm_dom=nil)
       node = ClusterEntity::Node.new
-      node.corosync = corosync_running?
-      node.corosync_enabled = corosync_enabled?
-      node.pacemaker = pacemaker_running?
-      node.pacemaker_enabled = pacemaker_enabled?
-      node.cman = cman_running?
-      node.pcsd_enabled = pcsd_enabled?
+      node.services.each do |service, info|
+        info[:running] = is_service_running?(service.to_s)
+        info[:enabled] = is_service_enabled?(service.to_s)
+        if ISSYSTEMCTL
+          # temporary solution
+          # is_service_installed is implemented only for systemd systems
+          info[:installed] = is_service_installed?(service.to_s)
+        end
+      end
+      node.corosync = node.services[:corosync][:running]
+      node.corosync_enabled = node.services[:corosync][:enabled]
+      node.pacemaker = node.services[:pacemaker][:running]
+      node.pacemaker_enabled = node.services[:pacemaker][:enabled]
+      node.cman = node.services[:cman][:running]
+      node.pcsd_enabled = node.services[:pcsd][:enabled]
 
       node_online = (node.corosync and node.pacemaker)
       node.status =  node_online ? 'online' : 'offline'
@@ -1044,7 +1062,7 @@ module ClusterEntity
       else
         node.status = 'offline'
       end
-
+      node.sbd_config = get_parsed_local_sbd_config()
       return node
     end
   end
diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
index 85cb95c..415e02a 100644
--- a/pcsd/pcs.rb
+++ b/pcsd/pcs.rb
@@ -16,6 +16,9 @@ require 'resource.rb'
 require 'cluster_entity.rb'
 require 'auth.rb'
 
+class NotImplementedException < NotImplementedError
+end
+
 def getAllSettings(auth_user, cib_dom=nil)
   unless cib_dom
     cib_dom = get_cib_dom(auth_user)
@@ -128,6 +131,40 @@ def add_order_set_constraint(
   return retval, stderr.join(' ')
 end
 
+
+def add_ticket_constraint(
+    auth_user, ticket, resource_id, role, loss_policy,
+    force=false, autocorrect=true
+)
+  command = [PCS, "constraint", "ticket", "add", ticket]
+  if role
+    command << role
+  end
+  command << resource_id
+  command << 'loss-policy=' + loss_policy unless loss_policy.strip().empty?()
+  command << '--force' if force
+  command << '--autocorrect' if autocorrect
+  stdout, stderr, retval = run_cmd(auth_user, *command)
+  return retval, stderr.join(' ')
+end
+
+def add_ticket_set_constraint(
+  auth_user, ticket, loss_policy, resource_set_list, force=false,
+  autocorrect=true
+)
+  command = [PCS, 'constraint', 'ticket']
+  resource_set_list.each { |resource_set|
+    command << 'set'
+    command.concat(resource_set)
+  }
+  command << 'setoptions'
+  command << 'ticket=' + ticket
+  command << 'loss-policy=' + loss_policy unless loss_policy.strip().empty?()
+  command << '--force' if force
+  stdout, stderr, retval = run_cmd(auth_user, *command)
+  return retval, stderr.join(' ')
+end
+
 def add_colocation_constraint(
   auth_user, resourceA, resourceB, score, force=false, autocorrect=true
 )
@@ -405,9 +442,12 @@ def send_request(auth_user, node, request, post=false, data={}, remote=true, raw
   end
 end
 
-def add_node(auth_user, new_nodename, all=false, auto_start=true)
+def add_node(auth_user, new_nodename, all=false, auto_start=true, watchdog=nil)
   if all
     command = [PCS, "cluster", "node", "add", new_nodename]
+    if watchdog and not watchdog.strip.empty?
+      command << "--watchdog=#{watchdog.strip}"
+    end
     if auto_start
       command << '--start'
       command << '--enable'
@@ -818,24 +858,6 @@ def disable_cluster(auth_user)
   return true
 end
 
-def corosync_running?()
-  if ISSYSTEMCTL
-    `systemctl status corosync.service`
-  else
-    `service corosync status`
-  end
-  return $?.success?
-end
-
-def corosync_enabled?()
-  if ISSYSTEMCTL
-    `systemctl is-enabled corosync.service`
-  else
-    `chkconfig corosync`
-  end
-  return $?.success?
-end
-
 def get_corosync_version()
   begin
     stdout, stderror, retval = run_cmd(
@@ -854,21 +876,7 @@ def get_corosync_version()
 end
 
 def pacemaker_running?()
-  if ISSYSTEMCTL
-    `systemctl status pacemaker.service`
-  else
-    `service pacemaker status`
-  end
-  return $?.success?
-end
-
-def pacemaker_enabled?()
-  if ISSYSTEMCTL
-    `systemctl is-enabled pacemaker.service`
-  else
-    `chkconfig pacemaker`
-  end
-  return $?.success?
+  is_service_running?('pacemaker')
 end
 
 def get_pacemaker_version()
@@ -888,15 +896,6 @@ def get_pacemaker_version()
   return nil
 end
 
-def cman_running?()
-  if ISSYSTEMCTL
-    `systemctl status cman.service`
-  else
-    `service cman status`
-  end
-  return $?.success?
-end
-
 def get_cman_version()
   begin
     stdout, stderror, retval = run_cmd(
@@ -936,15 +935,6 @@ def pcsd_restart()
   }
 end
 
-def pcsd_enabled?()
-  if ISSYSTEMCTL
-    `systemctl is-enabled pcsd.service`
-  else
-    `chkconfig pcsd`
-  end
-  return $?.success?
-end
-
 def get_pcsd_version()
   return PCS_VERSION.split(".").collect { | x | x.to_i }
 end
@@ -1425,6 +1415,7 @@ def cluster_status_from_nodes(auth_user, cluster_nodes, cluster_name)
     :status => 'unknown',
     :node_list => [],
     :resource_list => [],
+    :available_features => [],
   }
 
   threads = []
@@ -1453,6 +1444,7 @@ def cluster_status_from_nodes(auth_user, cluster_nodes, cluster_name)
       }
       begin
         parsed_response = JSON.parse(response, {:symbolize_names => true})
+        parsed_response[:available_features] ||= []
         if parsed_response[:noresponse]
           node_map[node][:node] = {}
           node_map[node][:node].update(node_status_unknown)
@@ -1537,6 +1529,31 @@ def cluster_status_from_nodes(auth_user, cluster_nodes, cluster_name)
     }
   end
   status.delete(:node)
+  sbd_enabled = []
+  sbd_running = []
+  sbd_disabled_node_list = []
+  node_map.each { |_, cluster_status|
+    node_status = cluster_status[:node][:status]
+    node_name = cluster_status[:node][:name]
+    # create set of available features on all nodes
+    # it is intersection of available features from all nodes
+    if node_status != 'unknown' and cluster_status[:available_features]
+      status[:available_features] &= cluster_status[:available_features]
+    end
+    if (
+      cluster_status[:node][:services] and
+      cluster_status[:node][:services][:sbd]
+    )
+      if cluster_status[:node][:services][:sbd][:enabled]
+        sbd_enabled << node_name
+      else
+        sbd_disabled_node_list << node_name if node_status != 'unknown'
+      end
+      if cluster_status[:node][:services][:sbd][:running]
+        sbd_running << node_name
+      end
+    end
+  }
 
   if status[:quorate]
     fence_count = 0
@@ -1545,9 +1562,9 @@ def cluster_status_from_nodes(auth_user, cluster_nodes, cluster_name)
         fence_count += 1
       end
     }
-    if fence_count == 0
+    if fence_count == 0 and sbd_enabled.empty?
       status[:warning_list] << {
-        :message => 'No fence devices configured in the cluster',
+        :message => 'No fencing configured in the cluster',
       }
     end
 
@@ -1557,6 +1574,30 @@ def cluster_status_from_nodes(auth_user, cluster_nodes, cluster_name)
         :message => 'Stonith is not enabled',
       }
     end
+    if not sbd_enabled.empty? and not sbd_disabled_node_list.empty?
+      status[:warning_list] << {
+        :message =>
+          "SBD is not enabled on node(s) #{sbd_disabled_node_list.join(', ')}",
+        :type => 'sbd_not_enabled_on_all_nodes',
+        :node_list => sbd_disabled_node_list
+      }
+    end
+    if not sbd_enabled.empty? and sbd_running.empty?
+      # if there is SBD running on at least one node, SBD has to be running
+      # on all online/standby nodes in cluster (it is impossible to have
+      # online node without running SBD, pacemaker will shutdown/not start
+      # in case like this)
+      status[:warning_list] << {
+        :message =>
+          'SBD is enabled but not running. Restart of cluster is required.',
+      }
+    end
+    if sbd_enabled.empty? and not sbd_running.empty?
+      status[:warning_list] << {
+        :message =>
+          'SBD is disabled but still running. Restart of cluster is required.',
+      }
+    end
   end
 
   if not_authorized_nodes.length > 0
@@ -1639,7 +1680,8 @@ def get_node_status(auth_user, cib_dom)
       :fence_levels => get_fence_levels(auth_user, cib_dom),
       :node_attr => node_attrs_to_v2(get_node_attributes(auth_user, cib_dom)),
       :nodes_utilization => get_nodes_utilization(cib_dom),
-      :known_nodes => []
+      :known_nodes => [],
+      :available_features => ['sbd', 'ticket_constraints']
   }
 
   nodes = get_nodes_status()
@@ -1848,3 +1890,101 @@ def get_default_overview_node_list(clustername)
   }
   return node_list
 end
+
+def is_service_enabled?(service)
+  if ISSYSTEMCTL
+    cmd = ['systemctl', 'is-enabled', "#{service}.service"]
+  else
+    cmd = ['chkconfig', service]
+  end
+  _, _, retcode = run_cmd(PCSAuth.getSuperuserAuth(), *cmd)
+  return (retcode == 0)
+end
+
+def is_service_running?(service)
+  if ISSYSTEMCTL
+    cmd = ['systemctl', 'status', "#{service}.service"]
+  else
+    cmd = ['service', service, 'status']
+  end
+  _, _, retcode = run_cmd(PCSAuth.getSuperuserAuth(), *cmd)
+  return (retcode == 0)
+end
+
+def is_service_installed?(service)
+  unless ISSYSTEMCTL
+    stdout, _, retcode = run_cmd(PCSAuth.getSuperuserAuth(), 'chkconfig')
+    if retcode != 0
+      return nil
+    end
+    stdout.each { |line|
+      if line.split(' ')[0] == service
+        return true
+      end
+      return false
+    }
+  end
+  stdout, _, retcode = run_cmd(
+    PCSAuth.getSuperuserAuth(), PCS, 'resource', 'list', 'systemd'
+  )
+  if retcode != 0
+    return nil
+  end
+  stdout.each { |line|
+    if line.strip() == "systemd:#{service}"
+      return true
+    end
+  }
+  return false
+end
+
+def enable_service(service)
+  if ISSYSTEMCTL
+    # fails when the service is not installed
+    cmd = ['systemctl', 'enable', "#{service}.service"]
+  else
+    # fails when the service is not installed
+    cmd = ['chkconfig', service, 'on']
+  end
+  _, _, retcode = run_cmd(PCSAuth.getSuperuserAuth(), *cmd)
+  return (retcode == 0)
+end
+
+def disable_service(service)
+  if ISSYSTEMCTL
+    # returns success even if the service is not installed
+    cmd = ['systemctl', 'disable', "#{service}.service"]
+  else
+    if not is_service_installed?(service)
+      return true
+    end
+    # fails when the service is not installed, so we need to check it beforehand
+    cmd = ['chkconfig', service, 'off']
+  end
+  _, _, retcode = run_cmd(PCSAuth.getSuperuserAuth(), *cmd)
+  return (retcode == 0)
+end
+
+def set_cluster_prop_force(auth_user, prop, val)
+  cmd = [PCS, 'property', 'set', "#{prop}=#{val}", '--force']
+  if pacemaker_running?
+    _, _, retcode = run_cmd(auth_user, *cmd)
+  else
+    cmd += ['-f', CIB_PATH]
+    _, _, retcode = run_cmd(PCSAuth.getSuperuserAuth(), *cmd)
+  end
+  return (retcode == 0)
+end
+
+def get_parsed_local_sbd_config()
+  cmd = [PCS, 'stonith', 'sbd', 'local_config_in_json']
+  out, _, retcode = run_cmd(PCSAuth.getSuperuserAuth(), *cmd)
+  if retcode != 0
+    return nil
+  end
+  begin
+    return JSON.parse(out.join(' '))
+  rescue JSON::ParserError
+    return nil
+  end
+end
diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
index 6bf7db6..d3032cf 100644
--- a/pcsd/pcsd.rb
+++ b/pcsd/pcsd.rb
@@ -1087,17 +1087,21 @@ already been added to pcsd.  You may not add two clusters with the same name int
         '<input placeholder="(?<shortdesc>[^"]*)"'
     )
 
-    desc = desc_regex.match(out)
-    unless desc
-      return [400, 'Unable to get meta-data of specified fence agent.']
-    end
     result = {
       :name => fence_agent,
-      :shortdesc => html2plain(desc[:short]),
-      :longdesc => html2plain(desc[:long]),
+      :shortdesc => '',
+      :longdesc => '',
       :parameters => []
     }
 
+    # pcsd in version 0.9.137 (and older) does not provide description for
+    # fence agents
+    desc = desc_regex.match(out)
+    if desc
+      result[:shortdesc] = html2plain(desc[:short])
+      result[:longdesc] = html2plain(desc[:long])
+    end
+
     parameters = parameters_regex.match(out)
     parameters[:required].scan(required_parameter_regex) { |match|
       result[:parameters] << {
diff --git a/pcsd/public/css/style.css b/pcsd/public/css/style.css
index 95535f0..d41b164 100644
--- a/pcsd/public/css/style.css
+++ b/pcsd/public/css/style.css
@@ -548,6 +548,52 @@ table.datatable {
   border-style: solid;
 }
 
+/* dark data table*/
+
+.darkdatatable {
+  border-spacing: 0px;
+}
+
+.darkdatatable td, .darkdatatable th {
+  font-family: "Liberation";
+  font-size: 11px;
+  padding: 5px;
+}
+
+.darkdatatable td, .darkdatatable th{
+  text-align: left;
+  padding-left: 10;
+}
+
+.darkdatatable th {
+  font-weight: bold;
+  color: #ffffff;
+  border-bottom: 1px solid #898989;
+  background: #232323;
+}
+
+.darkdatatable td {
+  background: #191919;
+  border: 0px;
+}
+
+table.darkdatatable td, table.darkdatatable a {
+  color: #ffffff;
+}
+
+table.darkdatatable {
+  border-width: 1px 1px 1px 1px;
+  border-color: #898989;
+  border-style: solid;
+}
+
+.darkdatatable_header {
+  padding-left: 3px;
+  font-family: "Overpass";
+  font-size: 14px;
+  color: #c9c9c9;
+}
+
 .link {
   cursor:pointer;
   font-family: "Liberation";
@@ -618,6 +664,11 @@ tr td a.remove {
   width: 16px;
 }
 
+.inverted {
+  filter: invert(100%);
+  -webkit-filter: invert(100%);
+}
+
 .x { background-position: -32px -1px; }
 .xdark { background-position: -32px -17px; }
 .plus { background-position: -48px -1px; }
@@ -632,6 +683,7 @@ tr td a.remove {
 .unstandby { background-position: -368px -16px; }
 .cancel { background-position: -160px -17px; }
 .configure { background-position: -64px -16px; }
+.configurelight { background-position: -64px 0px; }
 .arrow { background-position: -544px -16px; }
 .move { background-position: -16px -17px; }
 .history { background-position: -256px -17px; }
@@ -639,6 +691,8 @@ tr td a.remove {
 .downarrow { background-position: -528px -1px; }
 .rightarrow { background-position: -544px -1px; }
 .infoicon {background-position: -256px -1px; }
+.Xdark { background-position: -160px -16px; }
+.questionmarkdark { background-position: -336px -16px; }
 
 .menuheader { position: relative; }
 .menuheader .label {
@@ -733,7 +787,7 @@ li.menuheader {
   background-image: url(images/pbar-ani.gif);
 }
 
-#add_resource td, #add_stonith td, #create_new_cluster_form td {
+#new_resource_agent td, #new_stonith_agent td, #create_new_cluster_form td {
   color: white;
 }
 
@@ -790,3 +844,7 @@ li.menuheader {
 table.args-table td.reg {
   width: 17em;
 }
+
+.constraint-ticket-add-attribute {
+  vertical-align: top;
+}
diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js
index 4ff4ebc..bf1bb92 100644
--- a/pcsd/public/js/nodes-ember.js
+++ b/pcsd/public/js/nodes-ember.js
@@ -38,6 +38,35 @@ Pcs = Ember.Application.createWithMixins({
     if (this.cur_page == "wizards") return "display: table-row;";
     else return "display: none;";
   }.property("cur_page"),
+  available_features: [],
+  is_sbd_supported: function() {
+    return (this.get("available_features").indexOf("sbd") != -1);
+  }.property("available_features"),
+  is_ticket_constraints_supported: function(){
+    return (
+      this.get("available_features").indexOf("ticket_constraints") != -1
+    );
+  }.property("available_features"),
+  is_sbd_running: false,
+  is_sbd_enabled: false,
+  is_sbd_enabled_or_running: function() {
+    return (this.get("is_sbd_enabled") || this.get("is_sbd_running"));
+  }.property("is_sbd_enabled", "is_sbd_running"),
+  sbd_config: null,
+  sbd_config_table: function() {
+    if (!this.get("sbd_config")) {
+      return "no configuration obtained";
+    }
+    var out =
+      '<table class="darkdatatable"><tr><th>OPTION</th><th>VALUE</th></tr>\n';
+    var banned_options = ["SBD_OPTS", "SBD_WATCHDOG_DEV", "SBD_PACEMAKER"];
+    $.each(this.get("sbd_config"), function(opt, val) {
+      if (banned_options.indexOf(opt) == -1) {
+        out += '<tr><td>' + opt + '</td><td>' + val + '</td></tr>\n';
+      }
+    });
+    return out + '</table>';
+  }.property("sbd_config"),
 
   getResourcesFromID: function(resources) {
     var retArray = [];
@@ -112,6 +141,10 @@ Pcs = Ember.Application.createWithMixins({
         Pcs.set("cluster_settings",data.cluster_settings);
         Pcs.set('need_ring1_address', false);
         Pcs.set('is_cman_with_udpu_transport', false);
+        Pcs.set(
+          'available_features',
+          data['available_features'] ? data['available_features'] : []
+        );
         if (data['need_ring1_address']) {
           Pcs.set('need_ring1_address', true);
         }
@@ -260,6 +293,10 @@ Pcs.ParametersTableComponent = Ember.Component.extend({
   show_title: true,
   table_name: "",
   table_id: "",
+  table_id_suffix: "",
+  table_id_full: function() {
+    return this.get("table_id") + this.get("table_id_suffix");
+  }.property("table_id", "table_id_suffix"),
   content_style: function() {
     return ("display: " + (this.get("show_content") ? "block" : "none"));
   }.property("show_content"),
@@ -440,90 +477,6 @@ Pcs.resourcesContainer = Ember.Object.create({
     return family;
   },
 
-  get_constraints: function(cons) {
-    var ord_con = {};
-    var loc_con = {};
-    var col_con = {};
-    var ord_set_con = {};
-    var res_loc_constraints = {};
-    var res_ord_constraints = {};
-    var res_ord_set_constraints = {};
-    var res_col_constraints = {};
-    if (cons) {
-      if (cons["rsc_location"]) {
-        $.each(cons["rsc_location"], function (key, value) {
-          loc_con[value["id"]] = value;
-        });
-      }
-      if (cons["rsc_order"]) {
-        $.each(cons["rsc_order"], function (key, value) {
-          if (value["sets"]) {
-            ord_set_con[value["id"]] = value;
-          }
-          else {
-            ord_con[value["id"]] = value;
-          }
-        });
-      }
-      if (cons["rsc_colocation"]) {
-        $.each(cons["rsc_colocation"], function (key, value) {
-          col_con[value["id"]] = value;
-        });
-      }
-    }
-
-    $.each(loc_con, function (key, value) {
-      res_loc_constraints[value["rsc"]] = res_loc_constraints[value["rsc"]] || [];
-      res_loc_constraints[value["rsc"]].push(value);
-    });
-    $.each(ord_con, function (key, value) {
-      first = $.extend({"other_rsc":value["then"],"before":false}, value);
-      if (value["first"] in res_ord_constraints)
-        res_ord_constraints[value["first"]].push(first);
-      else res_ord_constraints[value["first"]] = [first];
-      then = $.extend({"other_rsc":value["first"],"before":true}, value);
-      if (value["then"] in res_ord_constraints)
-        res_ord_constraints[value["then"]].push(then);
-      else res_ord_constraints[value["then"]] = [then];
-    });
-
-    $.each(ord_set_con, function(key, set_con) {
-      $.each(set_con["sets"], function(key, set) {
-        $.each(set["resources"], function(key, resource) {
-          res_ord_set_constraints[resource] = res_ord_set_constraints[resource] || [];
-          if (res_ord_set_constraints[resource].indexOf(set_con) != -1) {
-            return;
-          }
-          res_ord_set_constraints[resource].push(set_con);
-        })
-      })
-    });
-
-    $.each(col_con, function (key, value) {
-      if (value["score"] == "INFINITY")
-        value["together"] = "Together";
-      else if (value["score"] == "-INFINITY" || value["score"] < 0)
-        value["together"] = "Apart";
-      else if (value["score"] >= 0)
-        value["together"] = "Together";
-
-      first = $.extend({"other_rsc":value["with-rsc"],"first":true}, value);
-      if (value["rsc"] in res_col_constraints)
-        res_col_constraints[value["rsc"]].push(first);
-      else res_col_constraints[value["rsc"]] = [first];
-      second = $.extend({"other_rsc":value["rsc"],"first":false}, value);
-      if (value["with-rsc"] in res_col_constraints)
-        res_col_constraints[value["with-rsc"]].push(second);
-      else res_col_constraints[value["with-rsc"]] = [second];
-    });
-    return {
-      "location_constraints": res_loc_constraints,
-      "ordering_constraints": res_ord_constraints,
-      "ordering_set_constraints": res_ord_set_constraints,
-      "colocation_constraints": res_col_constraints
-    };
-  },
-
   update_meta_attr: function(resource_id, attr, value) {
     value = typeof value !== 'undefined' ? value.trim() : "";
     var data = {
@@ -674,7 +627,7 @@ Pcs.resourcesContainer = Ember.Object.create({
     self.delete_unused_resources("fence_list", top_resource_map);
     self.delete_unused_resources("resource_list", top_resource_map);
 
-    var constraints = self.get_constraints(data["constraints"]);
+    var constraints = constraint_resort(data["constraints"]);
     self.set('constraints', constraints);
     var resource_map = self.get('resource_map');
     $.each(constraints, function(const_type, cons) {
@@ -1259,7 +1212,7 @@ Pcs.ACLsRoute = Ember.Route.extend({
 
 Pcs.ConfigurationRoute = Ember.Route.extend({
   setupController: function(controller, model) {
-    select_menu("CONFIGURE"); 
+    select_menu("CONFIGURE");
   }
 });
 
@@ -1268,7 +1221,7 @@ Pcs.ResourcesRoute = Ember.Route.extend({
     if (model) {
       select_menu("RESOURCES",model.name);
     } else {
-      select_menu("RESOURCES"); 
+      select_menu("RESOURCES");
     }
   },
   model: function(params) {
@@ -1472,6 +1425,54 @@ Pcs.Clusternode = Ember.Object.extend({
   pcsd: null,
   corosync_daemon: null,
   pacemaker_daemon: null,
+  services: [],
+  sbd_config: null,
+  sbd_status: function() {
+    if (this.get("services") && this.get("services")["sbd"]) {
+      return this.get("services")["sbd"];
+    } else {
+      return {
+        installed: null,
+        enabled: null,
+        running: null
+      };
+    }
+  }.property("services"),
+  is_sbd_enabled: function() {
+    return this.get("sbd_status").enabled;
+  }.property("sbd_status"),
+  is_sbd_running: function() {
+    return this.get("sbd_status").running;
+  }.property("sbd_status"),
+  is_sbd_installed: function() {
+    return this.get("sbd_status").installed;
+  }.property("sbd_status"),
+  sbd_status_str: function() {
+    var running = 'Stopped';
+    var status_class = 'status-offline';
+    if (this.get("is_sbd_running") == null) {
+      running = 'Unknown';
+      status_class = 'status-unknown';
+    } else if (this.get("is_sbd_running")) {
+      status_class = 'status';
+      running = 'Running';
+    }
+    var starting = 'Disabled';
+    if (this.get("is_sbd_enabled") == null) {
+      starting = 'Unknown';
+    } else if (this.get("is_sbd_enabled")) {
+      starting = 'Enabled';
+    }
+    return '<span id="sbd_status" style="float:left" class="' + status_class
+      + '">' + running + ' (' + starting + ')</span>';
+  }.property("is_sbd_enabled", "is_sbd_enabled"),
+  sbd_watchdog: function() {
+    if (this.get("sbd_config") && this.get("sbd_config")["SBD_WATCHDOG_DEV"]) {
+      return this.get("sbd_config")["SBD_WATCHDOG_DEV"];
+    } else {
+      return "<unkown>";
+    }
+  }.property("sbd_config")
 });
 
 Pcs.Aclrole = Ember.Object.extend({
@@ -1964,7 +1965,7 @@ Pcs.settingsController.reopen({
   filtered: function() {
     var self = this;
     var substr = self.get("filter").toLowerCase();
-    
+
     var to_show = [];
     $.each(self.get("properties"), function(_, e) {
       if (self.get("show_advanced")) {
@@ -1977,7 +1978,7 @@ Pcs.settingsController.reopen({
     if (!substr) {
       return to_show;
     }
-    
+
     var filtered = [];
     $.each(to_show, function(_, e) {
       if (e.get("name").toLowerCase().includes(substr) || e.get("readable_name").toLowerCase().includes(substr)) {
@@ -2030,6 +2031,14 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({
       window.location.hash = "/nodes/" + $(node_row).attr("nodeID");
   },
 
+  get_node_name_list: function() {
+    var node_list = [];
+    $.each(this.content, function(_, node) {
+      node_list.push(node.name);
+    });
+    return node_list;
+  },
+
   update: function(data){
     var self = this;
     var nodes = [];
@@ -2078,7 +2087,21 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({
       self.set("utilization_support", false);
     }
 
+    var is_sbd_enabled = false;
+    var is_sbd_running = false;
+    var sbd = null;
+    Pcs.set("sbd_config", null);
     $.each(data['node_list'], function(_, node_obj) {
+      if (node_obj["services"] && node_obj["services"]["sbd"]) {
+        sbd = node_obj["services"]["sbd"];
+        is_sbd_enabled = (is_sbd_enabled || sbd.enabled);
+        is_sbd_running = (is_sbd_running || sbd.running);
+      }
+
+      if (node_obj["sbd_config"]) {
+        Pcs.set("sbd_config", node_obj["sbd_config"]);
+      }
+
       var node_id = node_obj.name;
       if ($.inArray(node_id, corosync_nodes_online) > -1) {
         corosync_online = true;
@@ -2153,6 +2176,8 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({
           node.set("fence_levels", data["fence_levels"]);
           node.set("status", node_obj["status"]);
           node.set("utilization", utilization);
+          node.set("services", node_obj["services"]);
+          node.set("sbd_config", node_obj["sbd_config"]);
         }
       });
 
@@ -2178,7 +2203,9 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({
           node_attrs: node_attr,
           fence_levels: data["fence_levels"],
           status: node_obj["status"],
-          utilization: utilization
+          utilization: utilization,
+          services: node_obj["services"],
+          sbd_config: node_obj["sbd_config"]
         });
       }
       var pathname = window.location.pathname.split('/');
@@ -2204,6 +2231,9 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({
       self.content[0].set("cur_node",true);
     }
 
+    Pcs.set("is_sbd_enabled", is_sbd_enabled);
+    Pcs.set("is_sbd_running", is_sbd_running);
+
     nodesToRemove = [];
     $.each(self.content, function (key, node) {
       found = false;
@@ -2233,3 +2263,151 @@ Pcs.set('updater', Pcs.Updater.create({
   update_function: Pcs._update,
   update_target: Pcs
 }));
+
+function constraint_extend(){
+  var new_object = {}
+  for(var i in arguments){
+    var extension = arguments[i];
+    Object.keys(extension).forEach(function(key){
+      new_object[key] = extension[key];
+    });
+  }
+  return new_object;
+}
+
+function constraint_set_create_resource_keyed_map(constraint){
+  groups = {}
+  constraint.sets.forEach(function(resource_set){
+    resource_set.resources.forEach(function(resource_id){
+      groups[resource_id] = constraint
+    })
+  });
+  return groups;
+}
+
+function constraint_order_create_resource_keyed_map(constraint){
+  var groups = {};
+  groups[constraint["first"]] = constraint_extend(constraint, {
+    "other_rsc": constraint["then"],
+    "before":false
+  });
+  groups[constraint["then"]] = constraint_extend(constraint, {
+    "other_rsc": constraint["first"],
+    "before":true
+  });
+  return groups;
+}
+
+function constraint_colocation_create_resource_keyed_map(constraint){
+  var together = {}
+  if(constraint.score == "INFINITY" || constraint.score >= 0){
+    together.together = "Together";
+  }
+  if(constraint.score == "-INFINITY" || constraint.score < 0){
+    together.together = "Apart";
+  }
+
+  var groups = {};
+  groups[constraint["rsc"]] = constraint_extend(constraint, together, {
+    "other_rsc": constraint["with-rsc"],
+    "first": true
+  });
+
+  groups[constraint["with-rsc"]] = constraint_extend(constraint, together, {
+    "other_rsc": constraint["rsc"],
+    "first": false
+  });
+  return groups;
+}
+
+function constraint_location_distribute_to_resource(constraint){
+  var groups = {};
+  groups[constraint["rsc"]] = constraint;
+  return groups;
+}
+
+function constraint_ticket_distribute_to_resource(constraint){
+  var groups = {};
+  groups[constraint["rsc"]] = constraint;
+  return groups;
+}
+
+/**
+  Return object with nested object on each attribute ("with_sets", "plain").
+  Nested object has related constraint list on each attribute (resource id).
+  Example: {
+    with_sets: {"resA": [{constraint}, ...], "resB": [{constraint}, ...]}
+    plain: {"resA": [{constraint}, ...]}
+  }
+
+  @param {array} constraint_list list of constraints to distribute
+  @param {object} group_distributors on attributes ("with_sets", "plain") are
+    distribution methods. If attribute undefined, constraint is not distributed
+*/
+function constraint_resort_part(constraint_list, group_distributors){
+  var constraint_groups = {with_sets: {}, plain: {}}
+
+  if( ! constraint_list){
+    return constraint_groups;
+  }
+
+  constraint_list.forEach(function(constraint){
+    var group_name = constraint.sets ? "with_sets" : "plain";
+    var group = constraint_groups[group_name];
+    var distribute = group_distributors[group_name];
+
+    if( ! distribute){
+      return;
+    }
+
+    var resource_constraint_map = distribute(constraint);
+
+    for(var resource_id in resource_constraint_map){
+      var extended_constraint = resource_constraint_map[resource_id];
+      group[resource_id] = group[resource_id] || [];
+      if(group[resource_id].indexOf(extended_constraint) == -1){
+        group[resource_id].push(extended_constraint);
+      }
+    }
+  });
+
+  return constraint_groups;
+}
+
+function constraint_resort(constraints){
+  if( ! constraints){
+    return {
+      location_constraints: {},
+      ordering_constraints: {},
+      ordering_set_constraints: {},
+      colocation_constraints: {},
+    };
+  }
+
+  var orders = constraint_resort_part(constraints.rsc_order, {
+    plain: constraint_order_create_resource_keyed_map,
+    with_sets: constraint_set_create_resource_keyed_map,
+  });
+
+  var colocations = constraint_resort_part(constraints.rsc_colocation, {
+    plain: constraint_colocation_create_resource_keyed_map,
+  });
+
+  var locations = constraint_resort_part(constraints.rsc_location, {
+    plain: constraint_location_distribute_to_resource,
+  });
+
+  var tickets = constraint_resort_part(constraints.rsc_ticket, {
+    plain: constraint_ticket_distribute_to_resource,
+    with_sets: constraint_set_create_resource_keyed_map,
+  });
+
+  return {
+    location_constraints: locations.plain,
+    ordering_constraints: orders.plain,
+    ordering_set_constraints: orders.with_sets,
+    ticket_constraints: tickets.plain,
+    ticket_set_constraints: tickets.with_sets,
+    colocation_constraints: colocations.plain,
+  };
+}
diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
index 254f390..41c481e 100644
--- a/pcsd/public/js/pcsd.js
+++ b/pcsd/public/js/pcsd.js
@@ -262,9 +262,9 @@ function create_resource(form, update, stonith) {
         Pcs.update();
         if (!update) {
           if (stonith)
-            $('#add_stonith').dialog('close');
+            $('#new_stonith_agent').dialog('close');
           else
-            $('#add_resource').dialog('close');
+            $('#new_resource_agent').dialog('close');
         } else {
           reload_current_resource();
         }
@@ -456,7 +456,7 @@ function setNodeStatus(node, running) {
     $('.node_name:contains("'+node+'")').css('color','red');
   }
 }
-  
+
 
 function fade_in_out(id) {
   $(id).fadeTo(1000, 0.01, function() {
@@ -1470,18 +1470,35 @@ function add_meta_attr(parent_id) {
   Pcs.resourcesContainer.update_meta_attr(resource_id, attr, value);
 }
 
+
+function add_constraint_prepare_data(parent_id, constraint_type){
+  var value = function(sibling){
+    var form_value = $(parent_id + " " + sibling).val();
+    return form_value ? form_value.trim() : form_value;
+  };
+  switch(constraint_type){
+    case "ticket": return {
+      ticket: value("input[name='ticket']"),
+      role: value("select[name='role']"),
+      "loss-policy": value("select[name='loss-policy']"),
+    };
+  }
+  return {
+    rule: value("input[name='node_id']"),
+    score: value("input[name='score']"),
+    target_res_id: value("input[name='target_res_id']"),
+    order: value("select[name='order']"),
+    target_action: value("select[name='target_action']"),
+    res_action: value("select[name='res_action']"),
+    colocation_type: value("select[name='colocate']"),
+  };
+}
+
 function add_constraint(parent_id, c_type, force) {
-  var data = {};
+  var data = add_constraint_prepare_data(parent_id, c_type);
   data["disable_autocorrect"] = true;
   data["res_id"] = Pcs.resourcesContainer.cur_resource.get('id');
   data["node_id"] = $(parent_id + " input[name='node_id']").val();
-  data["rule"] = $(parent_id + " input[name='node_id']").val();
-  data["score"] = $(parent_id + " input[name='score']").val();
-  data["target_res_id"] = $(parent_id + " input[name='target_res_id']").val();
-  data["order"] = $(parent_id + " select[name='order']").val();
-  data["target_action"] = $(parent_id + " select[name='target_action']").val();
-  data["res_action"] = $(parent_id + " select[name='res_action']").val();
-  data["colocation_type"] = $(parent_id + " select[name='colocate']").val();
   data["c_type"] = c_type;
   if (force) {
     data["force"] = force;
@@ -1527,10 +1544,21 @@ function add_constraint(parent_id, c_type, force) {
   });
 }
 
+function add_constraint_set_get_options(parent_id, constraint_type){
+  switch(constraint_type){
+    case "ticket": return {
+      ticket: $(parent_id + " input[name='ticket']").val().trim(),
+      "loss-policy": $(parent_id + " select[name='loss-policy']").val().trim(),
+    };
+  }
+  return {};
+}
+
 function add_constraint_set(parent_id, c_type, force) {
   var data = {
     resources: [],
-    disable_autocorrect: true
+    disable_autocorrect: true,
+    options: {},
   };
   $(parent_id + " input[name='resource_ids[]']").each(function(index, element) {
     var resources = element.value.trim();
@@ -1538,6 +1566,7 @@ function add_constraint_set(parent_id, c_type, force) {
       data['resources'].push(resources.split(/\s+/));
     }
   });
+  data.options = add_constraint_set_get_options(parent_id, c_type);
   data["c_type"] = c_type;
   if (force) {
     data["force"] = force;
@@ -1613,6 +1642,11 @@ function remove_constraint(id) {
   });
 }
 
+function remove_constraint_action(remover_element){
+  remove_constraint($(remover_element).parent().attr('constraint_id'));
+  return false;
+}
+
 function remove_constraint_rule(id) {
   fade_in_out($("[rule_id='"+id+"']").parent());
   ajax_wrapper({
@@ -1758,7 +1792,7 @@ function remove_acl_item(id,item) {
 function update_cluster_settings() {
   $("#cluster_properties button").prop("disabled", true);
   var data = {
-    'hidden[hidden_input]': null // this is needed for backward compatibility 
+    'hidden[hidden_input]': null // this is needed for backward compatibility
   };
   $.each(Pcs.settingsController.get("properties"), function(_, prop) {
     data[prop.get("form_name")] = prop.get("cur_val");
@@ -2007,7 +2041,15 @@ function get_list_view_element_id(element) {
 }
 
 function auto_show_hide_constraints() {
-  var cont = ["location_constraints", "ordering_constraints", "ordering_set_constraints", "colocation_constraints", "meta_attributes"];
+  var cont = [
+    "location_constraints",
+    "ordering_constraints",
+    "ordering_set_constraints",
+    "colocation_constraints",
+    "ticket_constraints",
+    "ticket_set_constraints",
+    "meta_attributes",
+  ];
   $.each(cont, function(index, name) {
     var elem = $("#" + name)[0];
     var cur_resource = Pcs.resourcesContainer.get('cur_resource');
@@ -2214,10 +2256,10 @@ function resource_change_group(resource_id, group_id) {
     resource_id: resource_id,
     group_id: group_id
   };
-  
+
   if (resource_obj.get('parent')) {
     if (resource_obj.get('parent').get('id') == group_id) {
-      return;  
+      return;
     }
     if (resource_obj.get('parent').get('class_type') == 'group') {
       data['old_group_id'] = resource_obj.get('parent').get('id');
@@ -2709,7 +2751,7 @@ Ember.Handlebars.helper('selector-helper', function (content, value, place_holde
   var out = "";
   var line;
   if (place_holder) {
-    out += '<option value="">' + place_holder + '</option>'; 
+    out += '<option value="">' + place_holder + '</option>';
   }
   $.each(content, function(_, opt){
     line = '<option value="' + opt["value"] + '"';
@@ -2722,6 +2764,188 @@ Ember.Handlebars.helper('selector-helper', function (content, value, place_holde
   return new Handlebars.SafeString(out);
 });
 
+Ember.Handlebars.helper('bool-to-icon', function(value, options) {
+  var out = '<span class="sprites inverted ';
+  if (typeof(value) == 'undefined' || value == null) {
+    out += "questionmarkdark";
+  } else if (value) {
+    out += "checkdark"
+  } else {
+    out += "Xdark"
+  }
+  return new Handlebars.SafeString(out + '"> </span>');
+});
+
 function nl2br(text) {
   return text.replace(/(?:\r\n|\r|\n)/g, '<br />');
 }
+
+function enable_sbd(dialog) {
+  ajax_wrapper({
+    type: 'POST',
+    url: get_cluster_remote_url() + "remote_enable_sbd",
+    data: dialog.find("#enable_sbd_form").serialize(),
+    timeout: pcs_timeout,
+    success: function() {
+      dialog.parent().find("#enable_sbd_btn").button(
+        "option", "disabled", false
+      );
+      dialog.dialog("close");
+      alert(
+        'SBD enabled! You have to restart cluster in order to apply changes.'
+      );
+      Pcs.update();
+    },
+    error: function (xhr, status, error) {
+      dialog.parent().find("#enable_sbd_btn").button(
+        "option", "disabled", false
+      );
+      xhr.responseText = xhr.responseText.replace(
+        "--skip-offline", "option 'ignore offline nodes'"
+      );
+      alert(
+        ajax_simple_error(xhr, status, error)
+      );
+    }
+  });
+}
+
+function enable_sbd_dialog(node_list) {
+  var buttonsOpts = [
+    {
+      text: "Enable SBD",
+      id: "enable_sbd_btn",
+      click: function() {
+        var dialog = $(this);
+        dialog.parent().find("#enable_sbd_btn").button(
+          "option", "disabled", true
+        );
+        enable_sbd(dialog);
+      }
+    },
+    {
+      text:"Cancel",
+      click: function () {
+        $(this).dialog("close");
+      }
+    }
+  ];
+
+  var dialog_obj = $("#enable_sbd_dialog").dialog({title: 'Enable SBD',
+    modal: true, resizable: false,
+    width: 'auto',
+    buttons: buttonsOpts
+  });
+
+  dialog_obj.keypress(function(e) {
+    if (
+      e.keyCode == $.ui.keyCode.ENTER &&
+      !dialog_obj.parent().find("#enable_sbd_btn").button("option", "disabled")
+    ) {
+      dialog_obj.parent().find("#enable_sbd_btn").trigger("click");
+      return false;
+    }
+  });
+  dialog_obj.find('#watchdog_table').empty();
+  $.each(node_list, function(_, node) {
+    dialog_obj.find("#watchdog_table").append(
+      '<tr>' +
+        '<td>' +
+          node + ':' +
+        '</td>' +
+        '<td>' +
+          '<input ' +
+            'type="text" ' +
+            'placeholder="/dev/watchdog" ' +
+            'name="watchdog[' + node + ']" ' +
+          '/>' +
+        '</td>' +
+      '</tr>'
+    )
+  });
+}
+
+function disable_sbd(dialog) {
+  ajax_wrapper({
+    type: 'POST',
+    url: get_cluster_remote_url() + "remote_disable_sbd",
+    data: dialog.find("#disable_sbd_form").serialize(),
+    timeout: pcs_timeout,
+    success: function() {
+      dialog.parent().find("#disable_sbd_btn").button(
+        "option", "disabled", false
+      );
+      dialog.dialog("close");
+      alert(
+        'SBD disabled! You have to restart cluster in order to apply changes.'
+      );
+      Pcs.update();
+    },
+    error: function (xhr, status, error) {
+      dialog.parent().find("#disable_sbd_btn").button(
+        "option", "disabled", false
+      );
+      xhr.responseText = xhr.responseText.replace(
+        "--skip-offline", "option 'ignore offline nodes'"
+      );
+      alert(ajax_simple_error(xhr, status, error));
+    }
+  });
+}
+
+function disable_sbd_dialog() {
+  var buttonsOpts = [
+    {
+      text: "Disable SBD",
+      id: "disable_sbd_btn",
+      click: function() {
+        var dialog = $(this);
+        dialog.parent().find("#disable_sbd_btn").button(
+          "option", "disabled", true
+        );
+        disable_sbd(dialog);
+      }
+    },
+    {
+      text:"Cancel",
+      click: function () {
+        $(this).dialog("close");
+      }
+    }
+  ];
+
+  $("#disable_sbd_dialog").dialog({
+    title: 'Disable SBD',
+    modal: true, resizable: false,
+    width: 'auto',
+    buttons: buttonsOpts
+  });
+}
+
+function sbd_status_dialog() {
+  var buttonsOpts = [
+    {
+      text: "Enable SBD",
+      click: function() {
+        enable_sbd_dialog(Pcs.nodesController.get_node_name_list());
+      }
+    },
+    {
+      text: "Disable SBD",
+      click: disable_sbd_dialog
+    },
+    {
+      text:"Close",
+      click: function () {
+        $(this).dialog("close");
+      }
+    }
+  ];
+
+  $("#sbd_status_dialog").dialog({
+    title: 'SBD',
+    modal: true, resizable: false,
+    width: 'auto',
+    buttons: buttonsOpts
+  });
+}
diff --git a/pcsd/remote.rb b/pcsd/remote.rb
index 443c6ed..f002d5b 100644
--- a/pcsd/remote.rb
+++ b/pcsd/remote.rb
@@ -62,7 +62,16 @@ def remote(params, request, auth_user)
       :get_tokens => method(:get_tokens),
       :get_cluster_tokens => method(:get_cluster_tokens),
       :save_tokens => method(:save_tokens),
-      :get_cluster_properties_definition => method(:get_cluster_properties_definition)
+      :get_cluster_properties_definition => method(:get_cluster_properties_definition),
+      :check_sbd => method(:check_sbd),
+      :set_sbd_config => method(:set_sbd_config),
+      :get_sbd_config => method(:get_sbd_config),
+      :sbd_disable => method(:sbd_disable),
+      :sbd_enable => method(:sbd_enable),
+      :remove_stonith_watchdog_timeout=> method(:remove_stonith_watchdog_timeout),
+      :set_stonith_watchdog_timeout_to_zero => method(:set_stonith_watchdog_timeout_to_zero),
+      :remote_enable_sbd => method(:remote_enable_sbd),
+      :remote_disable_sbd => method(:remote_disable_sbd)
   }
   remote_cmd_with_pacemaker = {
       :pacemaker_node_status => method(:remote_pacemaker_node_status),
@@ -100,19 +109,22 @@ def remote(params, request, auth_user)
   }
 
   command = params[:command].to_sym
-
-  if remote_cmd_without_pacemaker.include? command
-    return remote_cmd_without_pacemaker[command].call(
-      params, request, auth_user
-    )
-  elsif remote_cmd_with_pacemaker.include? command
-    if pacemaker_running?
-      return remote_cmd_with_pacemaker[command].call(params, request, auth_user)
+  begin
+    if remote_cmd_without_pacemaker.include? command
+      return remote_cmd_without_pacemaker[command].call(
+        params, request, auth_user
+      )
+    elsif remote_cmd_with_pacemaker.include? command
+      if pacemaker_running?
+        return remote_cmd_with_pacemaker[command].call(params, request, auth_user)
+      else
+        return [200,'{"pacemaker_not_running":true}']
+      end
     else
-      return [200,'{"pacemaker_not_running":true}']
+      return [404, "Unknown Request"]
     end
-  else
-    return [404, "Unknown Request"]
+  rescue NotImplementedException => e
+    return [501, "#{e}"]
   end
 end
 
@@ -766,7 +778,9 @@ def remote_add_node(params, request, auth_user, all=false)
     if params[:new_ring1addr] != nil
       node += ',' + params[:new_ring1addr]
     end
-    retval, output = add_node(auth_user, node, all, auto_start)
+    retval, output = add_node(
+      auth_user, node, all, auto_start, params[:watchdog]
+    )
   end
 
   if retval == 0
@@ -814,7 +828,11 @@ def remote_remove_nodes(params, request, auth_user)
   if config.get_nodes($cluster_name) == nil or config.get_nodes($cluster_name).length == 0
     return [200,"No More Nodes"]
   end
-  return out
+  if retval != 0
+    return [400, out]
+  else
+    return [200, out]
+  end
 end
 
 def remote_remove_node(params, request, auth_user)
@@ -958,7 +976,7 @@ def node_status(params, request, auth_user)
 
   if not_authorized_nodes.length > 0
     node.warning_list << {
-      :message => 'Not authorized against node(s) ' + 
+      :message => 'Not authorized against node(s) ' +
         not_authorized_nodes.join(', '),
       :type => 'nodes_not_authorized',
       :node_list => not_authorized_nodes,
@@ -1504,7 +1522,7 @@ def remove_resource(params, request, auth_user)
       command << '--force' if force
       out, errout, retval = run_cmd(auth_user, *command)
       if retval != 0
-        unless out.index(" does not exist.") != -1 and no_error_if_not_exists  
+        unless out.index(" does not exist.") != -1 and no_error_if_not_exists
           errors += errout.join(' ').strip + "\n"
         end
       end
@@ -1694,6 +1712,12 @@ def add_constraint_remote(params, request, auth_user)
       auth_user,
       resA, resB, score, params["force"], !params['disable_autocorrect']
     )
+  when "ticket"
+    retval, error = add_ticket_constraint(
+      auth_user,
+      params["ticket"], params["res_id"], params["role"], params["loss-policy"],
+      params["force"], !params['disable_autocorrect']
+    )
   else
     return [400, "Unknown constraint type: #{params['c_type']}"]
   end
@@ -1736,6 +1760,18 @@ def add_constraint_set_remote(params, request, auth_user)
       auth_user,
       params["resources"].values, params["force"], !params['disable_autocorrect']
     )
+  when "ticket"
+    unless params["options"]["ticket"]
+      return [400, "Error adding constraint ticket: option ticket missing"]
+    end
+    retval, error = add_ticket_set_constraint(
+      auth_user,
+      params["options"]["ticket"],
+      (params["options"]["loss-policy"] or ""),
+      params["resources"].values,
+      params["force"],
+      !params['disable_autocorrect']
+    )
   else
     return [400, "Unknown constraint type: #{params["c_type"]}"]
   end
@@ -1997,7 +2033,7 @@ def resource_ungroup(params, request, auth_user)
   unless params[:group_id]
     return [400, 'group_id has to be specified.']
   end
-  
+
   _, stderr, retval = run_cmd(
     auth_user, PCS, 'resource', 'ungroup', params[:group_id]
   )
@@ -2017,7 +2053,7 @@ def resource_clone(params, request, auth_user)
   unless params[:resource_id]
     return [400, 'resource_id has to be specified.']
   end
-  
+
   _, stderr, retval = run_cmd(
     auth_user, PCS, 'resource', 'clone', params[:resource_id]
   )
@@ -2154,3 +2190,190 @@ def get_fence_agent_metadata(params, request, auth_user)
   end
   return [200, stdout.join("\n")]
 end
+
+def check_sbd(param, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::READ)
+    return 403, 'Permission denied'
+  end
+  out = {
+    :sbd => {
+      :installed => is_service_installed?('sbd'),
+      :enabled => is_service_enabled?('sbd'),
+      :running => is_service_running?('sbd')
+    }
+  }
+  watchdog = param[:watchdog]
+  if watchdog
+    out[:watchdog] = {
+      :path => watchdog,
+      :exist => File.exist?(watchdog)
+    }
+  end
+  return [200, JSON.generate(out)]
+end
+
+def set_sbd_config(param, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  config = param[:config]
+  unless config
+    return [400, 'Parameter "config" required']
+  end
+
+  file = nil
+  begin
+    file = File.open(SBD_CONFIG, 'w')
+    file.flock(File::LOCK_EX)
+    file.write(config)
+  rescue => e
+    msg = "Unable to save SBD configuration: #{e}"
+    $logger.error(msg)
+    return [400, msg]
+  ensure
+    if file
+      file.flock(File::LOCK_UN)
+      file.close()
+    end
+  end
+  msg = 'SBD configuration saved.'
+  $logger.info(msg)
+  return [200, msg]
+end
+
+def get_sbd_config(param, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::READ)
+    return 403, 'Permission denied'
+  end
+  out = []
+  file = nil
+  begin
+    file = File.open(SBD_CONFIG, 'r')
+    file.flock(File::LOCK_SH)
+    out = file.readlines()
+  rescue => e
+    msg = "Unable to get SBD configuration: #{e}"
+    $logger.error(msg)
+    return [400, msg]
+  ensure
+    if file
+      file.flock(File::LOCK_UN)
+      file.close()
+    end
+  end
+  return [200, out.join('')]
+end
+
+def sbd_disable(param, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  if disable_service('sbd')
+    msg = 'SBD disabled'
+    $logger.info(msg)
+    return [200, msg]
+  else
+    msg = 'Disabling SBD failed'
+    $logger.error(msg)
+    return [400, msg]
+  end
+end
+
+def sbd_enable(param, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  if enable_service('sbd')
+    msg = 'SBD enabled'
+    $logger.info(msg)
+    return [200, msg]
+  else
+    msg = 'Enabling SBD failed'
+    $logger.error(msg)
+    return [400, msg]
+  end
+end
+
+def remove_stonith_watchdog_timeout(param, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  if set_cluster_prop_force(auth_user, 'stonith-watchdog-timeout', '')
+    $logger.info('Cluster property "stonith-watchdog-timeout" removed')
+    return [200, 'OK']
+  else
+    $logger.info('Failed to remove cluster property "stonith-watchdog-timeout"')
+    return [400, 'ERROR']
+  end
+end
+
+def set_stonith_watchdog_timeout_to_zero(param, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  if set_cluster_prop_force(auth_user, 'stonith-watchdog-timeout', '0')
+    $logger.info('Cluster property "stonith-watchdog-timeout" set to "0"')
+    return [200, 'OK']
+  else
+    $logger.info(
+      'Failed to set cluster property "stonith-watchdog-timeout"to 0'
+    )
+    return [400, 'ERROR']
+  end
+end
+
+def remote_enable_sbd(params, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+
+  arg_list = []
+
+  if ['true', '1', 'on'].include?(params[:ignore_offline_nodes])
+    arg_list << '--skip-offline'
+  end
+
+  params[:watchdog].each do |node, watchdog|
+    unless watchdog.strip.empty?
+      arg_list << "--watchdog=#{watchdog.strip}@#{node}"
+    end
+  end
+
+  params[:config].each do |option, value|
+    unless value.empty?
+      arg_list << "#{option}=#{value}"
+    end
+  end
+
+  _, stderr, retcode = run_cmd(
+    auth_user, PCS, 'stonith', 'sbd', 'enable', *arg_list
+  )
+
+  if retcode != 0
+    return [400, "Unable to enable sbd in cluster:\n#{stderr.join('')}"]
+  end
+
+  return [200, 'Sbd has been enabled.']
+end
+
+def remote_disable_sbd(params, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+
+  arg_list = []
+
+  if ['true', '1', 'on'].include?(params[:ignore_offline_nodes])
+    arg_list << '--skip-offline'
+  end
+
+  _, stderr, retcode = run_cmd(
+    auth_user, PCS, 'stonith', 'sbd', 'disable', *arg_list
+  )
+
+  if retcode != 0
+    return [400, "Unable to disable sbd in cluster:\n#{stderr.join('')}"]
+  end
+
+  return [200, 'Sbd has been disabled.']
+end
diff --git a/pcsd/settings.rb b/pcsd/settings.rb
index ff056a4..6229161 100644
--- a/pcsd/settings.rb
+++ b/pcsd/settings.rb
@@ -18,6 +18,8 @@ COROSYNC_BINARIES = "/usr/sbin/"
 CMAN_TOOL = "/usr/sbin/cman_tool"
 PACEMAKERD = "/usr/sbin/pacemakerd"
 CIBADMIN = "/usr/sbin/cibadmin"
+SBD_CONFIG = '/etc/sysconfig/sbd'
+CIB_PATH='/var/lib/pacemaker/cib/cib.xml'
 
 SUPERUSER = 'hacluster'
 ADMIN_GROUP = 'haclient'
diff --git a/pcsd/settings.rb.debian b/pcsd/settings.rb.debian
index 5fb28da..7bc92a9 100644
--- a/pcsd/settings.rb.debian
+++ b/pcsd/settings.rb.debian
@@ -18,6 +18,8 @@ COROSYNC_BINARIES = "/usr/sbin/"
 CMAN_TOOL = "/usr/sbin/cman_tool"
 PACEMAKERD = "/usr/sbin/pacemakerd"
 CIBADMIN = "/usr/sbin/cibadmin"
+SBD_CONFIG = '/etc/sysconfig/sbd'
+CIB_PATH='/var/lib/pacemaker/cib/cib.xml'
 
 SUPERUSER = 'hacluster'
 ADMIN_GROUP = 'haclient'
diff --git a/pcsd/views/_dialogs.erb b/pcsd/views/_dialogs.erb
index 8bfa5c6..46e7fdb 100644
--- a/pcsd/views/_dialogs.erb
+++ b/pcsd/views/_dialogs.erb
@@ -55,3 +55,163 @@
   <p style="font-size:12px;">Are you sure you want to remove the following ACL role(s)?</p>
   <span class="name_list"></span>
 </div>
+
+<div id="enable_sbd_dialog" style="display:none;">
+  <form id="enable_sbd_form">
+    <table>
+      <tr>
+        <td class="darkdatatable_header">
+          Specify watchdog devices for nodes:
+        </td>
+      </tr>
+      <tr>
+        <td>
+          <table id="watchdog_table">
+          </table>
+        </td>
+      </tr>
+    </table>
+    <table style="margin-top: 20px;">
+      <tr>
+        <td class="darkdatatable_header">
+          SBD options:
+        </td>
+      </tr>
+      <tr>
+        <td>
+          <table id="sbd_options">
+            <tr>
+              <td>SBD_DELAY_START</td>
+              <td>
+                <select name="config[SBD_DELAY_START]">
+                  <option value="" selected>(default)</option>
+                  <option value="yes">yes</option>
+                  <option value="no">no</option>
+                </select>
+              </td>
+            </tr>
+            <tr>
+              <td>SBD_STARTMODE</td>
+              <td>
+                <select name="config[SBD_STARTMODE]">
+                  <option value="" selected>(default)</option>
+                  <option value="clean">clean</option>
+                  <option value="always">always</option>
+                </select>
+              </td>
+            </tr>
+            <tr>
+              <td>SBD_WATCHDOG_TIMEOUT</td>
+              <td>
+                <input
+                    type="text"
+                    name="config[SBD_WATCHDOG_TIMEOUT]"
+                    placeholder="5"
+                />
+              </td>
+            </tr>
+          </table>
+        </td>
+      </tr>
+    </table>
+    <table>
+      <tr>
+        <td>
+          <label for="ignore_offline_nodes_enable_sbd">
+            ignore offline nodes
+          </label>
+        </td>
+        <td>
+          <input
+              id="ignore_offline_nodes_enable_sbd"
+              type="checkbox"
+              name="ignore_offline_nodes"
+          />
+        </td>
+      </tr>
+    </table>
+  </form>
+</div>
+
+<div id="disable_sbd_dialog" style="display:none;">
+  <form id="disable_sbd_form">
+    Are you sure you want to disable SBD?
+    <table>
+      <tr>
+        <td>
+          <label for="ignore_offline_nodes_disable_sbd">
+            ignore offline nodes
+          </label>
+        </td>
+        <td>
+          <input
+              id="ignore_offline_nodes_disable_sbd"
+              type="checkbox"
+              name="ignore_offline_nodes"
+          />
+        </td>
+      </tr>
+    </table>
+  </form>
+</div>
+
+<div id="sbd_status_dialog" style="display:none;">
+  <table>
+    <tr>
+      <td class="darkdatatable_header">SBD service status</td>
+    </tr>
+    <tr>
+      <td>
+        <table class="darkdatatable">
+          <tr>
+            <th>NODE</th>
+            <th>INSTALLED</th>
+            <th>ENABLED</th>
+            <th>RUNNING</th>
+          </tr>
+          {{#each node in Pcs.nodesController}}
+          <tr>
+            <td>{{node.name}}</td>
+            <td>{{bool-to-icon node.is_sbd_installed}}</td>
+            <td>{{bool-to-icon node.is_sbd_enabled}}</td>
+            <td>{{bool-to-icon node.is_sbd_running}}</td>
+          </tr>
+          {{/each}}
+        </table>
+      </td>
+    </tr>
+  </table>
+  {{#if Pcs.is_sbd_enabled_or_running}}
+  <table style="margin-top: 20px;">
+    <tr>
+      <td class="darkdatatable_header">SBD configuration</td>
+    </tr>
+    <tr>
+      <td>
+        {{{Pcs.sbd_config_table}}}
+      </td>
+    </tr>
+  </table>
+  <table style="margin-top: 20px;">
+    <tr>
+      <td class="darkdatatable_header">SBD watchdogs</td>
+    </tr>
+    <tr>
+      <td>
+        <table class="darkdatatable">
+          <tr>
+            <th>NODE</th>
+            <th>WATCHDOG</th>
+          </tr>
+          {{#each node in Pcs.nodesController}}
+          <tr>
+            <td>{{node.name}}</td>
+            <td>{{node.sbd_watchdog}}</td>
+          </tr>
+          {{/each}}
+        </table>
+      </td>
+    </tr>
+  </table>
+  {{/if}}
+</div>
diff --git a/pcsd/views/_resource.erb b/pcsd/views/_resource.erb
index 4e2311c..a337160 100644
--- a/pcsd/views/_resource.erb
+++ b/pcsd/views/_resource.erb
@@ -14,13 +14,22 @@
       Remove</a>    </div>
   <div class="plus sprites"></div><div class="link"> 
     <% if @myView == "resource" %>
-      <a href="#" onclick="$('#add_resource').dialog({title: 'Add Resource', modal:true, width: 'auto'});return false;">
+      <a href="#" onclick="$('#new_resource_agent').dialog({title: 'Add Resource', modal:true, width: 'auto'});return false;">
     <% else %>
-      <a href="#" onclick="$('#add_stonith').dialog({title: 'Add Fence Device', modal:true, width: 'auto'});return false;">
+      <a href="#" onclick="$('#new_stonith_agent').dialog({title: 'Add Fence Device', modal:true, width: 'auto'});return false;">
     <% end %>
   Add</a>    </div>
     <% if @myView == "resource" %>
       <div class="plus sprites"></div><div class="link"> <a href="#" onclick="create_group();return false;">Create Group</a></div>
+    <% else %>
+        <div class="configurelight sprites"></div>
+        <div class="link">  
+          {{#if Pcs.is_sbd_supported}}
+            <a href="#" onclick="sbd_status_dialog();return false;">SBD</a>
+          {{else}}
+            <a href="#" onclick="alert('This cluster is running an old version of PCSD which does not support SBD.'); return false;">SBD</a>
+          {{/if}}
+        </div>
     <% end %>
     </td>
     <td class="borderbottom"> </td>
@@ -45,7 +54,7 @@
     <% if @myView == "resource" %>
       </div>
     </table>
-    <div id="add_resource" style="display: none;">
+    <div id="new_resource_agent" style="display: none;">
       <table id="resource_selector" style="clear:left;float:left;margin-top:25px;">
 	<tr>
 	  <td class="bold">Class/Provider</td>
@@ -83,9 +92,13 @@
 	  </td>
 	</tr>
       </table>
-      {{resource-form agent=Pcs.resourcesContainer.new_resource_agent_metadata groups=Pcs.resourcesContainer.groups_enum}}
+      {{resource-form
+          agent=Pcs.resourcesContainer.new_resource_agent_metadata
+          groups=Pcs.resourcesContainer.groups_enum
+          table_id_suffix="_new"
+      }}
     </div>
-    <div id="add_stonith" style="display: none;">
+    <div id="new_stonith_agent" style="display: none;">
       <table id="stonith_selector" style="clear:left;float:left;margin-top:25px;">
 	<tr>
 	  <td class="bold">Type</td>
@@ -98,7 +111,10 @@
 	  </td>
 	</tr>
       </table>
-        {{fence-form agent=Pcs.resourcesContainer.new_fence_agent_metadata}}
+      {{fence-form
+          agent=Pcs.resourcesContainer.new_fence_agent_metadata
+          table_id_suffix="_new"
+      }}
     </div>
     <div id="add_group" style="display: none;">
       <form method=POST onkeypress="if (event.keyCode == 13) {$(this).parent().parent().find('.ui-dialog-buttonpane button:eq(1)').trigger('click');return false;} " action="/resource_group_add">
diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb
index 55b02c2..b14c327 100644
--- a/pcsd/views/main.erb
+++ b/pcsd/views/main.erb
@@ -28,7 +28,7 @@
     });
     var origMainTRHeight = 0;
     function resizeDialog() {
-      $('#add_resource').dialog('option','position','center');
+      $('#new_resource_agent').dialog('option','position','center');
     }
     function resizeTable(refresh) {
       if (refresh == true) {
@@ -194,6 +194,7 @@
           <td class="reg" nowrap>{{resource.res_type}}</td>
         </tr>
         {{#if resource.is_primitive}}
+        {{#if resource.resource_agent.shortdesc}}
           <tr>
             <td>
               <div class="bold">Description:</div>
@@ -212,6 +213,7 @@
             </td>
           </tr>
         {{/if}}
+        {{/if}}
         {{#unless resource.stonith}}
         {{#if resource.is_primitive}}
           <tr>
@@ -289,6 +291,10 @@
         {{ordering_constraints-table constraints=resource.ordering_constraints resource_id=resource._id}}
         {{ordering_set_constraints-table constraints=resource.ordering_set_constraints}}
         {{colocation_constraints-table constraints=resource.colocation_constraints}}
+        {{#if Pcs.is_ticket_constraints_supported}}
+          {{ticket_constraints-table constraints=resource.ticket_constraints resource_id=resource._id}}
+          {{ticket_set_constraints-table constraints=resource.ticket_set_constraints}}
+        {{/if}}
         {{meta_attributes-table resource=resource}}
         {{#if utilization_support}}
           {{#if resource.is_primitive}}
@@ -299,13 +305,21 @@
       {{/unless}}
     </div>
     {{#if stonith}}
-      <div style="clear:left; margin-top: 2em;" id="stonith_agent_form">
-        {{fence-form resource=resource agent=resource.resource_agent}}
+      <div style="clear:left; margin-top: 2em;" id="stonith_info_div">
+        {{fence-form
+            resource=resource
+            agent=resource.resource_agent
+            table_id_suffix="_existing"
+        }}
       </div>
     {{else}}
     {{#if resource.is_primitive}}
-      <div style="clear:left; margin-top: 2em;" id="resource_agent_form">
-        {{resource-form resource=resource agent=resource.resource_agent}}
+      <div style="clear:left; margin-top: 2em;" id="resource_info_div">
+        {{resource-form
+            resource=resource
+            agent=resource.resource_agent
+            table_id_suffix="_existing"
+        }}
       </div>
     {{/if}}
     {{/if}}
@@ -340,7 +354,7 @@
 			  <td>{{cons.score}}</td>
 			  <td {{bind-attr constraint_id=cons.id}} style="text-align:center">
 			    {{#unless cons.temp}}
-				<a onclick="remove_constraint($(this).parent().attr('constraint_id'));return false;" href="#" class="remove">X</a>
+				<a onclick="return remove_constraint_action(this);" href="#" class="remove">X</a>
 			    {{/unless}}
 			  </td>
 			{{/if}}
@@ -380,7 +394,7 @@
 			    {{/if}}
 			    <td>{{cons.score}}</td>
 			    <td {{bind-attr constraint_id=cons.id}} style="text-align:center">
-				  <a onclick="remove_constraint($(this).parent().attr('constraint_id'));return false;" href="#" class="remove">X</a>
+				  <a onclick="return remove_constraint_action(this);" href="#" class="remove">X</a>
 			    </td>
 			  </tr>
 			{{else}}
@@ -435,7 +449,7 @@
                 <tr>
                   <td>{{cons.id}}</td>
                   <td {{bind-attr constraint_id="cons.id"}} style="text-align:center;">
-                    <a onclick="remove_constraint($(this).parent().attr('constraint_id')); return false;" href="#" class="remove">X</a>
+                    <a onclick="return remove_constraint_action(this);" href="#" class="remove">X</a>
                   </td>
                 </tr>
                 {{#each set in cons.sets}}
@@ -462,6 +476,189 @@ Use the 'Add' button to submit the form.">
         </table>
   </script>
 
+  <script
+    type="text/x-handlebars"
+    data-template-name="components/ticket_constraints-table"
+  >
+    <table style="clear:left;float:left;">
+      <tr>
+        <td
+          style="display: block;"
+          onclick="show_hide_constraints(this)"
+          class="datatable_header hover-pointer"
+          id="ticket_constraints"
+        >
+          <span style="" class="downarrow sprites"></span>
+          <span style="display: none;" class="rightarrow sprites"></span>
+          Resource Ticket Preferences
+          ({{#if constraints.length}}{{constraints.length}}{{else}}0{{/if}})
+        </td>
+      </tr>
+      <tr>
+        <td>
+          <div id="locationdep">
+            <table class="datatable">
+              <tr>
+                <th>Ticket</th>
+                <th>Resource Role</th>
+                <th>Loss Policy</th>
+                <th>Remove</th>
+              </tr>
+              {{#each cons in constraints}}
+                <tr>
+                  <td>{{cons.ticket}}</td>
+                  <td>{{cons.rsc-role}}</td>
+                  <td>{{cons.loss-policy}}</td>
+                  <td
+                    {{bind-attr constraint_id=cons.id}}
+                    style="text-align:center"
+                  >
+                    <a
+                      onclick="return remove_constraint_action(this);"
+                      href="#"
+                      class="remove"
+                    >X</a>
+                  </td>
+                </tr>
+              {{else}}
+                <tr>
+                  <td colspan="4" style="color: gray;">NONE</td>
+                </tr>
+              {{/each}}
+              <tr id="new_ticket_constraint">
+                <td class="constraint-ticket-add-attribute">
+                  <input type="text" name="ticket"/>
+                </td>
+                <td class="constraint-ticket-add-attribute">
+                  <select name="role"/>
+                    <option value=""></option>
+                    <option value="Stopped">Stopped</option>
+                    <option value="Started">Started</option>
+                    <option value="Master">Master</option>
+                    <option value="Slave">Slave</option>
+                  </selct>
+                </td>
+                <td class="constraint-ticket-add-attribute">
+                  <select name="loss-policy"/>
+                    <option value=""></option>
+                    <option value="fence">fence</option>
+                    <option value="stop">stop</option>
+                    <option value="freeze">freeze</option>
+                    <option value="demote">demote</option>
+                  </selct>
+                </td>
+                <td>
+                  <button type="button" name="add"
+                    onclick="add_constraint(
+                      '#new_ticket_constraint',
+                      'ticket',
+                      false
+                    );"
+                  >Add</button>
+                </td>
+              </tr>
+            </table>
+          </div>
+        </td>
+      </tr>
+    </table>
+  </script>
+
+  <script
+    type="text/x-handlebars"
+    data-template-name="components/ticket_set_constraints-table"
+  >
+    <table style="clear:left;float:left;">
+      <tr>
+        <td
+          id="ticket_set_constraints"
+          class="datatable_header hover-pointer"
+          style="display: block;"
+          onclick="show_hide_constraints(this)"
+        >
+          <span style="" class="downarrow sprites"></span>
+          <span style="display: none;" class="rightarrow sprites"></span>
+          Resource Ticket Set Preferences
+          ({{#if constraints.length}}{{constraints.length}}{{else}}0{{/if}})
+        </td>
+      </tr>
+      <tr>
+        <td>
+          <div id="locationdep">
+            <table class="datatable">
+              <tr>
+                <th>Preference Name/Set of Resources</th>
+                <th>Ticket</th>
+                <th>Loss Policy</th>
+                <th style="text-align: center;">Remove</th>
+              </tr>
+              {{#each cons in constraints}}
+                <tr>
+                  <td>{{cons.id}}</td>
+                  <td>{{cons.ticket}}</td>
+                  <td>{{cons.loss-policy}}</td>
+                  <td
+                    {{bind-attr constraint_id="cons.id"}}
+                    style="text-align:center;"
+                  >
+                    <a
+                      onclick="return remove_constraint_action(this);"
+                      href="#"
+                      class="remove"
+                    >X</a>
+                  </td>
+                </tr>
+                {{#each set in cons.sets}}
+                <tr>
+                  <td style="padding-left:2em;" colspan="4">
+                    Set:{{#each rsc in set.resources}} {{rsc}}{{/each}}
+                  </td>
+                </tr>
+                {{/each}}
+              {{else}}
+                <tr><td style="color: gray;" colspan="4">NONE</td></tr>
+              {{/each}}
+              <tr id="new_ticket_constraint_resource_set" title="
+                Enter the resources you want to be in one set into the 'Set'
+                field separated by space.
+                Use the 'New Set' button to create more sets.
+                Use the 'Add' button to submit the form.
+              ">
+                <td>Set: <input type="text" name="resource_ids[]"></td>
+                <td class="constraint-ticket-add-attribute">
+                  <input type="text" name="ticket"/>
+                </td>
+                <td class="constraint-ticket-add-attribute">
+                  <select name="loss-policy"/>
+                    <option value=""></option>
+                    <option value="fence">fence</option>
+                    <option value="stop">stop</option>
+                    <option value="freeze">freeze</option>
+                    <option value="demote">demote</option>
+                  </selct>
+                </td>
+                <td style="vertical-align: bottom;">
+                  <button type="button" name="new-row"
+                    onclick="new_constraint_set_row(
+                      '#new_ticket_constraint_resource_set'
+                    );"
+                  >New Set</button>
+                  <button type="button" name="add"
+                    onclick="add_constraint_set(
+                      '#new_ticket_constraint_resource_set',
+                      'ticket',
+                      false
+                    );"
+                  >Add</button>
+                </td>
+              </tr>
+            </table>
+          </div>
+        </td>
+      </tr>
+    </table>
+  </script>
+
   <script type="text/x-handlebars" data-template-name="components/colocation_constraints-table">
     <table style="clear:left;float:left">
 		<tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer" id="colocation_constraints"><span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span>Resource Colocation Preferences ({{#if constraints.length}}{{constraints.length}}{{else}}0{{/if}})</td></tr>
@@ -474,7 +671,7 @@ Use the 'Add' button to submit the form.">
 			    <td>{{cons.together}}</td>
 			    <td>{{cons.score}}</td>
 			    <td {{bind-attr constraint_id=cons.id}} style="text-align:center">
-				  <a onclick="remove_constraint($(this).parent().attr('constraint_id'));return false;" href="#" class="remove">X</a>
+				  <a onclick="return remove_constraint_action(this);" href="#" class="remove">X</a>
 			    </td>
 			  </tr>
 			  {{else}}
@@ -526,7 +723,11 @@ Use the 'Add' button to submit the form.">
   <script type="text/x-handlebars" data-template-name="components/utilization-table">
     <table style="clear:left; float:left;">
       <tr>
-        <td {{action toggleBody}} class="datatable_header hover-pointer">
+        <td
+            {{action toggleBody}}
+            id="utilization_attributes"
+            class="datatable_header hover-pointer"
+        >
           {{#if show_content}}
             <span class="downarrow sprites"></span>
           {{else}}
@@ -600,7 +801,12 @@ Use the 'Add' button to submit the form.">
     <table style="clear:left; margin-top: 1em;" class="args-table">
       {{#if show_title}}
         <tr>
-          <td {{action toggleBody}} {{bind-attr id=table_id}} colspan="2" nowrap>
+          <td
+              {{action toggleBody}}
+              {{bind-attr id=table_id_full}}
+              colspan="2"
+              nowrap
+          >
             {{#if show_content}}
               <span class="downarrow sprites"></span>
             {{else}}
@@ -706,11 +912,15 @@ Use the 'Add' button to submit the form.">
           </div>
         {{/unless}}
         {{parameters-table
+            table_id="required_args"
+            table_id_suffix=table_id_suffix
             table_name="Required Arguments"
             parameters=agent.required_parameters
             show_content=true
         }}
         {{parameters-table
+            table_id="optional_args"
+            table_id_suffix=table_id_suffix
             table_name="Optional Arguments"
             parameters=agent.optional_parameters
         }}
@@ -738,6 +948,7 @@ Use the 'Add' button to submit the form.">
         {{#unless resource}}
           <table class="args-table">
             <tbody>
+              {{#if agent.shortdesc}}
               <tr>
                 <td>
                   <div class="bold">Description:</div>
@@ -755,6 +966,7 @@ Use the 'Add' button to submit the form.">
                   <div class="long_desc_div reg" style="display:none; font-size:12px; max-width:350px;">{{{agent.longdesc_html}}}</div>
                 </td>
               </tr>
+              {{/if}}
               <tr>
                 <td class="reg">
                   Fence Instance Name
@@ -767,15 +979,21 @@ Use the 'Add' button to submit the form.">
           </table>
         {{/unless}}
         {{parameters-table
+            table_id="required_fenceargs"
+            table_id_suffix=table_id_suffix
             table_name="Required Arguments"
             parameters=agent.required_parameters
             show_content=true
         }}
         {{parameters-table
+            table_id="optional_fenceargs"
+            table_id_suffix=table_id_suffix
             table_name="Optional Arguments"
             parameters=agent.optional_parameters
         }}
         {{parameters-table
+            table_id="advanced_fenceargs"
+            table_id_suffix=table_id_suffix
             table_name="Advanced Arguments"
             parameters=agent.advanced_parameters
         }}
@@ -827,7 +1045,7 @@ Use the 'Add' button to submit the form.">
       {{/if}}{{/if}}
     </td>
   </script>
-  
+
   <script type="text/x-handlebars">
 <div id="wrapper">
 
diff --git a/pcsd/views/nodes.erb b/pcsd/views/nodes.erb
index a554bc8..478e0f6 100644
--- a/pcsd/views/nodes.erb
+++ b/pcsd/views/nodes.erb
@@ -185,6 +185,11 @@
                       {{/if}}
                       {{/if}}
                     </td></tr>
+                    {{#if Pcs.is_sbd_enabled_or_running }}
+                    <tr><td>sbd</td><td><div style="float:left;margin-right:6px;" class="check sprites"></div>
+                      {{{Pcs.nodesController.cur_node.sbd_status_str}}}
+                    </td></tr>
+                    {{/if}}
                     <tr><td>pcsd</td><td><div style="float:left;margin-right:6px;" class="check sprites"></div>
                       {{#if Pcs.nodesController.cur_node.pcsd}}
                       <span id="pcsd_status" style="float:left" class="status">Running ({{Pcs.nodesController.cur_node.pcsd_startup}})</span>
@@ -335,6 +340,14 @@
         <td>Auto Start/Enable:</td>
         <td><input type="checkbox" name="auto_start" value="1" checked></td>
       </tr>
+      {{#if Pcs.is_sbd_enabled}}
+      <tr>
+        <td>SBD watchdog:</td>
+        <td>
+          <input type="text" name="watchdog" placeholder="/dev/watchdog" />
+        </td>
+      </tr>
+      {{/if}}
       {{#if Pcs.is_cman_with_udpu_transport}}
       <tr>
         <td colspan="2" style="color: orange">This is a CMAN cluster with UDPU transport,<br>cluster restart is required to apply node addition.</td>
diff --git a/setup.py b/setup.py
index 58c159a..0e267e0 100644
--- a/setup.py
+++ b/setup.py
@@ -16,13 +16,19 @@ class CleanCommand(Command):
 
 setup(
     name='pcs',
-    version='0.9.151',
+    version='0.9.152',
     description='Pacemaker Configuration System',
     author='Chris Feist',
     author_email='cfeist at redhat.com',
-    url='http://github.com/feist/pcs',
-    packages=find_packages(exclude=["*.test", "*.test.*", "test.*", "test"]),
-    package_data={'pcs':['bash_completion.d.pcs','pcs.8']},
+    url='https://github.com/ClusterLabs/pcs',
+    packages=find_packages(),
+    package_data={'pcs':[
+        'bash_completion.sh',
+        'pcs.8',
+        'pcs',
+        'test/resources/*.xml',
+        'test/resources/*.conf',
+    ]},
     entry_points={
         'console_scripts': [
             'pcs = pcs.app:main',

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-ha/pcs.git



More information about the Debian-HA-Commits mailing list