[Debian-ha-commits] [pcs] 01/05: New upstream version 0.9.162

Valentin Vidic vvidic-guest at moszumanska.debian.org
Mon Nov 27 11:04:10 UTC 2017


This is an automated email from the git hooks/post-receive script.

vvidic-guest pushed a commit to branch master
in repository pcs.

commit 719d5e9f0d05464e251b823feaab07c8f80a6946
Author: Valentin Vidic <Valentin.Vidic at CARNet.hr>
Date:   Sun Nov 19 09:16:52 2017 +0100

    New upstream version 0.9.162
---
 CHANGELOG.md                                       |   32 +-
 Makefile                                           |   50 +-
 README.md                                          |    1 +
 newversion.py                                      |   12 +-
 pcs/app.py                                         |    2 +-
 pcs/cli/common/capabilities.py                     |    4 +-
 pcs/cli/common/console_report.py                   |   85 +-
 pcs/cli/common/lib_wrapper.py                      |    3 +-
 pcs/cli/common/test/test_capabilities.py           |    2 +-
 pcs/cli/common/test/test_console_report.py         |  203 ++
 pcs/cluster.py                                     |    2 +-
 pcs/common/report_codes.py                         |    5 +-
 pcs/common/test/test_tools_xml_fromstring.py       |   97 +
 pcs/common/tools.py                                |    7 +-
 pcs/lib/booth/test/test_config_structure.py        |    5 +
 pcs/lib/cib/test/test_constraint.py                |    1 +
 pcs/lib/cib/test/test_constraint_colocation.py     |    1 +
 pcs/lib/cib/test/test_constraint_order.py          |    1 +
 pcs/lib/cib/test/test_constraint_ticket.py         |    5 +-
 pcs/lib/cib/test/test_resource_guest_node.py       |    1 +
 pcs/lib/cib/test/test_resource_operations.py       |    1 +
 pcs/lib/cib/test/test_resource_remote_node.py      |    1 +
 pcs/lib/cib/test/test_resource_set.py              |    3 +-
 pcs/lib/cib/test/test_tools.py                     |   10 +-
 pcs/lib/cib/tools.py                               |    9 +-
 pcs/lib/commands/quorum.py                         |   63 +-
 pcs/lib/commands/sbd.py                            |   10 +-
 .../commands/test/resource/test_bundle_create.py   |    8 +
 pcs/lib/corosync/config_facade.py                  |  310 ++-
 pcs/lib/corosync/qdevice_net.py                    |    7 +-
 pcs/lib/pacemaker/live.py                          |   40 +-
 pcs/lib/pacemaker/test/test_live.py                |  153 +-
 pcs/lib/reports.py                                 |   96 +-
 pcs/lib/test/test_env.py                           |    6 +-
 pcs/lib/test/test_resource_agent.py                |    6 +-
 pcs/lib/test/test_validate.py                      |   26 +
 pcs/lib/validate.py                                |    5 +-
 pcs/pcs                                            |    6 +-
 pcs/pcs.8                                          |   24 +-
 pcs/quorum.py                                      |   96 +-
 pcs/resource.py                                    |   28 +-
 pcs/settings_default.py                            |    4 +-
 pcs/snmp/Makefile                                  |   46 +
 pcs/snmp/__init__.py                               |    0
 pcs/snmp/agentx/__init__.py                        |    0
 pcs/snmp/agentx/pcs_pyagentx.py                    |   15 +
 pcs/snmp/agentx/types.py                           |   48 +
 pcs/snmp/agentx/updater.py                         |   98 +
 pcs/snmp/mibs/PCMK-PCS-MIB.txt                     |   16 +
 pcs/snmp/mibs/PCMK-PCS-V1-MIB.txt                  |  218 ++
 pcs/snmp/pcs_snmp_agent                            |   18 +
 pcs/snmp/pcs_snmp_agent.8                          |   29 +
 pcs/snmp/pcs_snmp_agent.conf                       |    7 +
 pcs/snmp/pcs_snmp_agent.logrotate                  |   10 +
 pcs/snmp/pcs_snmp_agent.py                         |   92 +
 pcs/snmp/pcs_snmp_agent.service                    |   12 +
 pcs/snmp/settings.py                               |   11 +
 pcs/snmp/updaters/__init__.py                      |    0
 pcs/snmp/updaters/v1.py                            |  209 ++
 pcs/status.py                                      |   10 +
 pcs/stonith.py                                     |    2 +
 pcs/test/curl_test.py                              |    2 +-
 .../corosync-3nodes-qdevice-heuristics.conf        |   44 +
 .../resources/qdevice-certs/final-certificate.pk12 |    1 +
 .../qdevice-certs/qdevice-cert-request.crq         |    1 +
 pcs/test/resources/qdevice-certs/qnetd-cacert.crt  |    2 +-
 .../resources/qdevice-certs/signed-certificate.crt |    1 +
 pcs/test/test_lib_commands_quorum.py               | 2169 ++++++++++++--------
 pcs/test/test_lib_commands_sbd.py                  |   15 +
 pcs/test/test_lib_corosync_config_facade.py        | 1203 +++++++++--
 pcs/test/test_lib_corosync_qdevice_net.py          |   11 +-
 pcs/test/test_quorum.py                            |  400 ++--
 pcs/test/tools/command_env/assistant.py            |   57 +-
 pcs/test/tools/command_env/config.py               |    2 +-
 pcs/test/tools/command_env/config_corosync_conf.py |    4 +-
 pcs/test/tools/command_env/config_env.py           |   14 +
 .../tools/command_env/config_runner_corosync.py    |   23 +-
 pcs/test/tools/command_env/config_runner_pcmk.py   |   41 +
 pcs/test/tools/command_env/mock_push_cib.py        |   14 +-
 .../tools/command_env/mock_push_corosync_conf.py   |   50 +
 pcs/test/tools/command_env/mock_runner.py          |    3 +
 pcs/usage.py                                       |   63 +-
 pcs/utils.py                                       |   13 +-
 pcsd/bootstrap.rb                                  |   32 +-
 pcsd/capabilities.xml                              |   49 +-
 pcsd/cluster_entity.rb                             |    4 +-
 pcsd/pcsd-cli.rb                                   |   21 +-
 pcsd/pcsd.8                                        |    3 +-
 pcsd/pcsd.rb                                       |   30 +-
 pcsd/public/js/nodes-ember.js                      |    3 +
 pcsd/public/js/pcsd.js                             |   80 +-
 pcsd/remote.rb                                     |   49 +-
 pcsd/views/main.erb                                |   22 +-
 pylintrc                                           |    2 +-
 setup.py                                           |    5 +-
 95 files changed, 5187 insertions(+), 1518 deletions(-)

diff --git a/CHANGELOG.md b/CHANGELOG.md
index 7990e68..8c9db4f 100644
--- a/CHANGELOG.md
+++ b/CHANGELOG.md
@@ -1,5 +1,35 @@
 # Change Log
 
+## [0.9.162] - 2017-11-15
+
+### Added
+- `pcs status --full` now displays information about tickets ([rhbz#1389943])
+- Support for managing qdevice heuristics ([rhbz#1389209])
+- SNMP agent providing information about cluster to the master agent. It
+  supports only python 2.7 for now ([rhbz#1367808]).
+
+### Fixed
+- Fixed crash when loading a huge xml ([rhbz#1506864])
+- Fixed adding an existing cluster into the GUI ([rhbz#1415197])
+- False warnings about failed actions when resource is master/unmaster from the
+  web UI ([rhbz#1506220])
+
+### Changed
+- `pcs resource|stonith cleanup` no longer deletes the whole operation history
+  of resources. Instead, it only deletes failed operations from the history. The
+  original functionality is available in the `pcs resource|stonith refresh`
+  command. ([rhbz#1508351], [rhbz#1508350])
+
+[rhbz#1367808]: https://bugzilla.redhat.com/show_bug.cgi?id=1367808
+[rhbz#1389209]: https://bugzilla.redhat.com/show_bug.cgi?id=1389209
+[rhbz#1389943]: https://bugzilla.redhat.com/show_bug.cgi?id=1389943
+[rhbz#1415197]: https://bugzilla.redhat.com/show_bug.cgi?id=1415197
+[rhbz#1506220]: https://bugzilla.redhat.com/show_bug.cgi?id=1506220
+[rhbz#1506864]: https://bugzilla.redhat.com/show_bug.cgi?id=1506864
+[rhbz#1508350]: https://bugzilla.redhat.com/show_bug.cgi?id=1508350
+[rhbz#1508351]: https://bugzilla.redhat.com/show_bug.cgi?id=1508351
+
+
 ## [0.9.161] - 2017-11-02
 
 ### Added
@@ -9,7 +39,7 @@
 - Fixed `pcs cluster auth` when already authenticated and using different port
   ([rhbz#1415197])
 - It is now possible to restart a bundle resource on one node ([rhbz#1501274])
-- `resurce update` no longer exits with an error when the `remote-node` meta
+- `resource update` no longer exits with an error when the `remote-node` meta
   attribute is set to the same value that it already has
   ([rhbz#1502715], [ghissue#145])
 - Listing and describing resource and stonith agents no longer crashes when
diff --git a/Makefile b/Makefile
index 6423be3..04cd62a 100644
--- a/Makefile
+++ b/Makefile
@@ -83,6 +83,23 @@ ifndef PCSD_PARENT_DIR
   endif
 endif
 
+ifndef PCS_PARENT_DIR
+  PCS_PARENT_DIR=${DESTDIR}/${PREFIX}/lib/pcs
+endif
+
+BUNDLED_LIB_INSTALL_DIR=${PCS_PARENT_DIR}/bundled
+
+ifndef BUNDLED_LIB_DIR
+  BUNDLED_LIB_DIR=./pcs/bundled/
+endif
+BUNDLED_LIB_DIR_ABS=$(shell readlink -f ${BUNDLED_LIB_DIR})
+BUNDLES_TMP_DIR=${BUNDLED_LIB_DIR_ABS}/tmp
+
+ifndef SNMP_MIB_DIR
+  SNMP_MIB_DIR=/share/snmp/mibs/
+endif
+SNMP_MIB_DIR_FULL=${DESTDIR}/${PREFIX}/${SNMP_MIB_DIR}
+
 pcsd_fonts = \
 	LiberationSans-Regular.ttf;LiberationSans:style=Regular \
 	LiberationSans-Bold.ttf;LiberationSans:style=Bold \
@@ -92,7 +109,7 @@ pcsd_fonts = \
 	Overpass-Bold.ttf;Overpass:style=Bold
 
 
-install:
+install: install_bundled_libs
 	# make Python interpreter execution sane (via -Es flags)
 	printf "[build]\nexecutable = $(PYTHON) -Es\n" > setup.cfg
 	$(PYTHON) setup.py install --root=$(or ${DESTDIR}, /) ${EXTRA_SETUP_OPTS}
@@ -100,11 +117,24 @@ install:
 	# https://github.com/pypa/setuptools/issues/188
 	# https://bugzilla.redhat.com/1353934
 	sed -i '1s|^\(#!\)"\(.*\)"$$|\1\2|' ${DESTDIR}${PREFIX}/bin/pcs
+	sed -i '1s|^\(#!\)"\(.*\)"$$|\1\2|' ${DESTDIR}${PREFIX}/bin/pcs_snmp_agent
 	rm setup.cfg
 	mkdir -p ${DESTDIR}${PREFIX}/sbin/
 	mv ${DESTDIR}${PREFIX}/bin/pcs ${DESTDIR}${PREFIX}/sbin/pcs
 	install -D -m644 pcs/bash_completion ${BASH_COMPLETION_DIR}/pcs
 	install -m644 -D pcs/pcs.8 ${DESTDIR}/${MANDIR}/man8/pcs.8
+	# pcs SNMP install
+	mv ${DESTDIR}${PREFIX}/bin/pcs_snmp_agent ${PCS_PARENT_DIR}/pcs_snmp_agent
+	install -d ${DESTDIR}/var/log/pcs
+	install -d ${SNMP_MIB_DIR_FULL}
+	install -m 644 pcs/snmp/mibs/PCMK-PCS*-MIB.txt ${SNMP_MIB_DIR_FULL}
+	install -m 644 -D pcs/snmp/pcs_snmp_agent.conf ${DESTDIR}/etc/sysconfig/pcs_snmp_agent
+	install -m 644 -D pcs/snmp/pcs_snmp_agent.logrotate ${DESTDIR}/etc/logrotate.d/pcs_snmp_agent
+	install -m 644 -D pcs/snmp/pcs_snmp_agent.8 ${DESTDIR}/${MANDIR}/man8/pcs_snmp_agent.8
+ifeq ($(IS_SYSTEMCTL),true)
+	install -d ${DESTDIR}/${systemddir}/system/
+	install -m 644 pcs/snmp/pcs_snmp_agent.service ${DESTDIR}/${systemddir}/system/
+endif
 ifeq ($(IS_DEBIAN),true)
   ifeq ($(install_settings),true)
 	rm -f  ${DESTDIR}${PYTHON_SITELIB}/pcs/settings.py
@@ -166,21 +196,39 @@ endif
 		$(if $(font_path),ln -s -f $(font_path) ${DESTDIR}${PCSD_PARENT_DIR}/pcsd/public/css/$(font_file);,$(error Font $(font_def) not found)) \
 	)
 
+build_bundled_libs:
+ifndef PYAGENTX_INSTALLED
+	rm -rf ${BUNDLES_TMP_DIR}
+	mkdir -p ${BUNDLES_TMP_DIR}
+	$(MAKE) -C pcs/snmp/ build_bundled_libs
+	rm -rf ${BUNDLES_TMP_DIR}
+endif
+
+install_bundled_libs: build_bundled_libs
+ifndef PYAGENTX_INSTALLED
+	install -d ${BUNDLED_LIB_INSTALL_DIR}
+	cp -r ${BUNDLED_LIB_DIR_ABS}/packages ${BUNDLED_LIB_INSTALL_DIR}
+endif
+
 uninstall:
 	rm -f ${DESTDIR}${PREFIX}/sbin/pcs
 	rm -rf ${DESTDIR}${PYTHON_SITELIB}/pcs
 ifeq ($(IS_DEBIAN),true)
 	rm -rf ${DESTDIR}/usr/share/pcsd
+	rm -rf ${DESTDIR}/usr/share/pcs
 else
 	rm -rf ${DESTDIR}${PREFIX}/lib/pcsd
+	rm -rf ${DESTDIR}${PREFIX}/lib/pcs
 endif
 ifeq ($(IS_SYSTEMCTL),true)
 	rm -f ${DESTDIR}/${systemddir}/system/pcsd.service
+	rm -f ${DESTDIR}/${systemddir}/system/pcs_snmp_agent.service
 else
 	rm -f ${DESTDIR}/${initdir}/pcsd
 endif
 	rm -f ${DESTDIR}/etc/pam.d/pcsd
 	rm -rf ${DESTDIR}/var/lib/pcsd
+	rm -f ${SNMP_MIB_DIR_FULL}/PCMK-PCS*-MIB.txt
 
 tarball:
 	$(PYTHON) setup.py sdist --formats=tar
diff --git a/README.md b/README.md
index d46aead..b723e0d 100644
--- a/README.md
+++ b/README.md
@@ -47,6 +47,7 @@ installation:
 * fontconfig
 * printf (package coreutils)
 * redhat-rpm-config if you are using Fedora
+* wget (to download bundled libraries)
 
 During the installation, all required rubygems are automatically downloaded and
 compiled.
diff --git a/newversion.py b/newversion.py
index 1d453f7..f857934 100644
--- a/newversion.py
+++ b/newversion.py
@@ -28,14 +28,20 @@ print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version+"/' pcs/settin
 print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version+"/' pcsd/bootstrap.rb"))
 print(os.system("sed -i 's/\#\# \[Unreleased\]/\#\# ["+new_version+"] - "+datetime.date.today().strftime('%Y-%m-%d')+"/' CHANGELOG.md"))
 
-def manpage_head(component):
-    return '.TH {component} "8" "{date}" "pcs {version}" "System Administration Utilities"'.format(
+def manpage_head(component, package="pcs"):
+    return '.TH {component} "8" "{date}" "{package} {version}" "System Administration Utilities"'.format(
         component=component.upper(),
         date=datetime.date.today().strftime('%B %Y'),
-        version=new_version
+        version=new_version,
+        package=package,
     )
 print(os.system("sed -i '1c " + manpage_head("pcs") + "' pcs/pcs.8"))
 print(os.system("sed -i '1c " + manpage_head("pcsd") + "' pcsd/pcsd.8"))
+print(os.system(
+    "sed -i '1c {man_head}' pcs/snmp/pcs_snmp_agent.8".format(
+        man_head=manpage_head("pcs_snmp_agent", package="pcs-snmp"),
+    )
+))
 
 print(os.system("git diff"))
 print("Look good? (y/n)")
diff --git a/pcs/app.py b/pcs/app.py
index 6a8e650..f60a81c 100644
--- a/pcs/app.py
+++ b/pcs/app.py
@@ -151,7 +151,7 @@ def main(argv=None):
 
     # create a dummy logger
     # we do not have a log file for cli (yet), but library requires a logger
-    logger = logging.getLogger("old_cli")
+    logger = logging.getLogger("pcs")
     logger.propagate = 0
     logger.handlers = []
 
diff --git a/pcs/cli/common/capabilities.py b/pcs/cli/common/capabilities.py
index 90d7ff1..2c90d16 100644
--- a/pcs/cli/common/capabilities.py
+++ b/pcs/cli/common/capabilities.py
@@ -8,9 +8,9 @@ from lxml import etree
 import os.path
 from textwrap import dedent
 
+from pcs import settings
 from pcs.cli.common.console_report import error
 from pcs.common.tools import xml_fromstring
-from pcs.utils import get_pcsd_dir
 
 
 def get_capabilities_definition():
@@ -19,7 +19,7 @@ def get_capabilities_definition():
 
     The point is to return all data in python structures for further processing.
     """
-    filename = os.path.join(get_pcsd_dir(), "capabilities.xml")
+    filename = os.path.join(settings.pcsd_exec_location, "capabilities.xml")
     try:
         with open(filename, mode="r") as file_:
             capabilities_xml = xml_fromstring(file_.read())
diff --git a/pcs/cli/common/console_report.py b/pcs/cli/common/console_report.py
index b3a4a3d..406532d 100644
--- a/pcs/cli/common/console_report.py
+++ b/pcs/cli/common/console_report.py
@@ -131,6 +131,32 @@ def resource_running_on_nodes(info):
         ]))
     )
 
+def invalid_option(info):
+    template = "invalid {desc}option{plural_options} {option_names_list},"
+    if not info["allowed"] and not info["allowed_patterns"]:
+        template += " there are no options allowed"
+    elif not info["allowed_patterns"]:
+        template += " allowed option{plural_allowed} {allowed_values}"
+    elif not info["allowed"]:
+        template += (
+            " allowed are options matching patterns: {allowed_patterns_values}"
+        )
+    else:
+        template += (
+            " allowed option{plural_allowed} {allowed_values}"
+            " and"
+            " options matching patterns: {allowed_patterns_values}"
+        )
+    return template.format(
+        desc=format_optional(info["option_type"], "{0} "),
+        allowed_values=", ".join(sorted(info["allowed"])),
+        allowed_patterns_values=", ".join(sorted(info["allowed_patterns"])),
+        option_names_list=joined_list(info["option_names"]),
+        plural_options=("s:" if len(info["option_names"]) > 1 else ""),
+        plural_allowed=("s are:" if len(info["allowed"]) > 1 else " is"),
+        **info
+    )
+
 def build_node_description(node_types):
     if not node_types:
         return  "Node"
@@ -201,23 +227,7 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
         )
     ,
 
-    codes.INVALID_OPTION: lambda info:
-        (
-            "invalid {desc}option{s} {option_names_list},"
-            +
-            (
-                " allowed option{are} {allowed_values}" if info["allowed"]
-                else " there are no options allowed"
-            )
-        ).format(
-            desc=format_optional(info["option_type"], "{0} "),
-            allowed_values=", ".join(sorted(info["allowed"])),
-            option_names_list=joined_list(info["option_names"]),
-            s=("s:" if len(info["option_names"]) > 1 else ""),
-            are=("s are:" if len(info["allowed"]) > 1 else " is"),
-            **info
-        )
-    ,
+    codes.INVALID_OPTION: invalid_option,
 
     codes.INVALID_OPTION_VALUE: lambda info:
         #value on key "allowed_values" is overloaded:
@@ -253,6 +263,18 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
         )
     ,
 
+    codes.INVALID_USERDEFINED_OPTIONS: lambda info:
+        (
+            "invalid {desc}option{plural_options} {option_names_list}, "
+            "{allowed_description}"
+        ).format(
+            desc=format_optional(info["option_type"], "{0} "),
+            option_names_list=joined_list(info["option_names"]),
+            plural_options=("s:" if len(info["option_names"]) > 1 else ""),
+            **info
+        )
+    ,
+
     codes.DEPRECATED_OPTION: lambda info:
         (
             "{desc}option '{option_name}' is deprecated and should not be "
@@ -495,6 +517,11 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
         .format(**info)
     ,
 
+    codes.COROSYNC_QUORUM_HEURISTICS_ENABLED_WITH_NO_EXEC:
+        "No exec_NAME options are specified, so heuristics are effectively "
+            "disabled"
+    ,
+
     codes.COROSYNC_CONFIG_RELOADED: "Corosync configuration reloaded",
 
     codes.COROSYNC_CONFIG_RELOAD_ERROR: lambda info:
@@ -688,8 +715,9 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
         .format(**info)
     ,
 
-    codes.CIB_LOAD_ERROR_BAD_FORMAT:
-       "unable to get cib, xml does not conform to the schema"
+    codes.CIB_LOAD_ERROR_BAD_FORMAT: lambda info:
+       "unable to get cib, {reason}"
+       .format(**info)
     ,
 
     codes.CIB_CANNOT_FIND_MANDATORY_SECTION: lambda info:
@@ -747,15 +775,26 @@ CODE_TO_MESSAGE_BUILDER_MAP = {
 
     codes.RESOURCE_CLEANUP_ERROR: lambda info:
         (
-             "Unable to cleanup resource: {resource}\n{reason}"
+             (
+                "Unable to forget failed operations of resource: {resource}"
+                "\n{reason}"
+             )
+             if info["resource"] else
+             "Unable to forget failed operations of resources\n{reason}"
+        ).format(**info)
+    ,
+
+    codes.RESOURCE_REFRESH_ERROR: lambda info:
+        (
+             "Unable to delete history of resource: {resource}\n{reason}"
              if info["resource"] else
-             "Unexpected error occured. 'crm_resource -C' error:\n{reason}"
+             "Unable to delete history of resources\n{reason}"
         ).format(**info)
     ,
 
-    codes.RESOURCE_CLEANUP_TOO_TIME_CONSUMING: lambda info:
+    codes.RESOURCE_REFRESH_TOO_TIME_CONSUMING: lambda info:
         (
-             "Cleaning up all resources on all nodes will execute more "
+             "Deleting history of all resources on all nodes will execute more "
              "than {threshold} operations in the cluster, which may "
              "negatively impact the responsiveness of the cluster. "
              "Consider specifying resource and/or node"
diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
index 85a7c0c..134c5ad 100644
--- a/pcs/cli/common/lib_wrapper.py
+++ b/pcs/cli/common/lib_wrapper.py
@@ -46,7 +46,7 @@ def wrapper(dictionary):
 
 def cli_env_to_lib_env(cli_env):
     return LibraryEnvironment(
-        logging.getLogger("old_cli"),
+        logging.getLogger("pcs"),
         LibraryReportProcessorToConsole(cli_env.debug),
         cli_env.user,
         cli_env.groups,
@@ -304,6 +304,7 @@ def load_module(env, middleware_factory, name):
                 "add_device": quorum.add_device,
                 "get_config": quorum.get_config,
                 "remove_device": quorum.remove_device,
+                "remove_device_heuristics": quorum.remove_device_heuristics,
                 "set_expected_votes_live": quorum.set_expected_votes_live,
                 "set_options": quorum.set_options,
                 "status": quorum.status_text,
diff --git a/pcs/cli/common/test/test_capabilities.py b/pcs/cli/common/test/test_capabilities.py
index af23485..b921753 100644
--- a/pcs/cli/common/test/test_capabilities.py
+++ b/pcs/cli/common/test/test_capabilities.py
@@ -10,7 +10,7 @@ from pcs.test.tools.pcs_unittest import mock, TestCase
 from pcs.cli.common import capabilities
 
 
- at mock.patch("pcs.cli.common.capabilities.get_pcsd_dir", lambda: rc(""))
+ at mock.patch("pcs.settings.pcsd_exec_location", rc(""))
 class Capabilities(TestCase):
     def test_get_definition(self):
         self.assertEqual(
diff --git a/pcs/cli/common/test/test_console_report.py b/pcs/cli/common/test/test_console_report.py
index ea862f8..2798912 100644
--- a/pcs/cli/common/test/test_console_report.py
+++ b/pcs/cli/common/test/test_console_report.py
@@ -54,6 +54,7 @@ class BuildInvalidOptionMessageTest(NameBuildTest):
                 "option_names": ["NAME"],
                 "option_type": "TYPE",
                 "allowed": ["SECOND", "FIRST"],
+                "allowed_patterns": [],
             }
         )
 
@@ -64,6 +65,7 @@ class BuildInvalidOptionMessageTest(NameBuildTest):
                 "option_names": ["NAME"],
                 "option_type": "",
                 "allowed": ["FIRST", "SECOND"],
+                "allowed_patterns": [],
             }
         )
 
@@ -74,6 +76,35 @@ class BuildInvalidOptionMessageTest(NameBuildTest):
                 "option_names": ["NAME", "ANOTHER"],
                 "option_type": "",
                 "allowed": ["FIRST"],
+                "allowed_patterns": [],
+            }
+        )
+
+    def test_pattern(self):
+        self.assert_message_from_info(
+            (
+                "invalid option 'NAME', allowed are options matching patterns: "
+                "exec_<name>"
+            ),
+            {
+                "option_names": ["NAME"],
+                "option_type": "",
+                "allowed": [],
+                "allowed_patterns": ["exec_<name>"],
+            }
+        )
+
+    def test_allowed_and_patterns(self):
+        self.assert_message_from_info(
+            (
+                "invalid option 'NAME', allowed option is FIRST and options "
+                "matching patterns: exec_<name>"
+            ),
+            {
+                "option_names": ["NAME"],
+                "option_type": "",
+                "allowed": ["FIRST"],
+                "allowed_patterns": ["exec_<name>"],
             }
         )
 
@@ -84,6 +115,51 @@ class BuildInvalidOptionMessageTest(NameBuildTest):
                 "option_names": ["NAME", "ANOTHER"],
                 "option_type": "",
                 "allowed": [],
+                "allowed_patterns": [],
+            }
+        )
+
+
+class InvalidUserdefinedOptions(NameBuildTest):
+    code = codes.INVALID_USERDEFINED_OPTIONS
+
+    def test_without_type(self):
+        self.assert_message_from_info(
+            (
+                "invalid option 'exec_NAME', "
+                "exec_NAME cannot contain . and whitespace characters"
+            ),
+            {
+                "option_names": ["exec_NAME"],
+                "option_type": "",
+                "allowed_description":
+                    "exec_NAME cannot contain . and whitespace characters"
+                ,
+            }
+        )
+
+    def test_with_type(self):
+        self.assert_message_from_info(
+            (
+                "invalid heuristics option 'exec_NAME', "
+                "exec_NAME cannot contain . and whitespace characters"
+            ),
+            {
+                "option_names": ["exec_NAME"],
+                "option_type": "heuristics",
+                "allowed_description":
+                    "exec_NAME cannot contain . and whitespace characters"
+                ,
+            }
+        )
+
+    def test_more_options(self):
+        self.assert_message_from_info(
+            "invalid TYPE options: 'ANOTHER', 'NAME', DESC",
+            {
+                "option_names": ["NAME", "ANOTHER"],
+                "option_type": "TYPE",
+                "allowed_description": "DESC",
             }
         )
 
@@ -1808,3 +1884,130 @@ class DefaultsCanBeOverriden(NameBuildTest):
             "Defaults do not apply to resources which override them with their "
             "own defined values"
         )
+
+
+class CibLoadErrorBadFormat(NameBuildTest):
+    code = codes.CIB_LOAD_ERROR_BAD_FORMAT
+    def test_message(self):
+        self.assert_message_from_info(
+            "unable to get cib, something wrong",
+            {
+                "reason": "something wrong"
+            }
+        )
+
+
+class CorosyncQuorumHeuristicsEnabledWithNoExec(NameBuildTest):
+    code = codes.COROSYNC_QUORUM_HEURISTICS_ENABLED_WITH_NO_EXEC
+    def test_message(self):
+        self.assert_message_from_info(
+            "No exec_NAME options are specified, so heuristics are effectively "
+                "disabled"
+        )
+
+
+class ResourceCleanupError(NameBuildTest):
+    code = codes.RESOURCE_CLEANUP_ERROR
+
+    def test_minimal(self):
+        self.assert_message_from_info(
+            "Unable to forget failed operations of resources\nsomething wrong",
+            {
+                "reason": "something wrong",
+                "resource": None,
+                "node": None,
+            }
+        )
+
+    def test_node(self):
+        self.assert_message_from_info(
+            "Unable to forget failed operations of resources\nsomething wrong",
+            {
+                "reason": "something wrong",
+                "resource": None,
+                "node": "N1",
+            }
+        )
+
+    def test_resource(self):
+        self.assert_message_from_info(
+            "Unable to forget failed operations of resource: R1\n"
+                "something wrong"
+            ,
+            {
+                "reason": "something wrong",
+                "resource": "R1",
+                "node": None,
+            }
+        )
+
+    def test_resource_and_node(self):
+        self.assert_message_from_info(
+            "Unable to forget failed operations of resource: R1\n"
+                "something wrong"
+            ,
+            {
+                "reason": "something wrong",
+                "resource": "R1",
+                "node": "N1",
+            }
+        )
+
+
+class ResourceRefreshError(NameBuildTest):
+    code = codes.RESOURCE_REFRESH_ERROR
+
+    def test_minimal(self):
+        self.assert_message_from_info(
+            "Unable to delete history of resources\nsomething wrong",
+            {
+                "reason": "something wrong",
+                "resource": None,
+                "node": None,
+            }
+        )
+
+    def test_node(self):
+        self.assert_message_from_info(
+            "Unable to delete history of resources\nsomething wrong",
+            {
+                "reason": "something wrong",
+                "resource": None,
+                "node": "N1",
+            }
+        )
+
+    def test_resource(self):
+        self.assert_message_from_info(
+            "Unable to delete history of resource: R1\nsomething wrong",
+            {
+                "reason": "something wrong",
+                "resource": "R1",
+                "node": None,
+            }
+        )
+
+    def test_resource_and_node(self):
+        self.assert_message_from_info(
+            "Unable to delete history of resource: R1\nsomething wrong",
+            {
+                "reason": "something wrong",
+                "resource": "R1",
+                "node": "N1",
+            }
+        )
+
+
+class ResourceRefreshTooTimeConsuming(NameBuildTest):
+    code = codes.RESOURCE_REFRESH_TOO_TIME_CONSUMING
+    def test_success(self):
+        self.assert_message_from_info(
+            "Deleting history of all resources on all nodes will execute more "
+                "than 25 operations in the cluster, which may negatively "
+                "impact the responsiveness of the cluster. Consider specifying "
+                "resource and/or node"
+            ,
+            {
+                "threshold": 25,
+            }
+        )
diff --git a/pcs/cluster.py b/pcs/cluster.py
index a2971e6..a330164 100644
--- a/pcs/cluster.py
+++ b/pcs/cluster.py
@@ -1752,7 +1752,7 @@ def node_add(lib_env, node0, node1, modifiers):
             conf_facade = corosync_conf_facade.from_string(
                 utils.getCorosyncConf()
             )
-            qdevice_model, qdevice_model_options, _ = conf_facade.get_quorum_device_settings()
+            qdevice_model, qdevice_model_options, _, _ = conf_facade.get_quorum_device_settings()
             if qdevice_model == "net":
                 _add_device_model_net(
                     lib_env,
diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
index 6077ce1..6ab619f 100644
--- a/pcs/common/report_codes.py
+++ b/pcs/common/report_codes.py
@@ -97,6 +97,7 @@ COROSYNC_NOT_RUNNING_ON_NODE = "COROSYNC_NOT_RUNNING_ON_NODE"
 COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE = "COROSYNC_OPTIONS_INCOMPATIBLE_WITH_QDEVICE"
 COROSYNC_QUORUM_CANNOT_DISABLE_ATB_DUE_TO_SBD = "COROSYNC_QUORUM_CANNOT_DISABLE_ATB_DUE_TO_SBD"
 COROSYNC_QUORUM_GET_STATUS_ERROR = "COROSYNC_QUORUM_GET_STATUS_ERROR"
+COROSYNC_QUORUM_HEURISTICS_ENABLED_WITH_NO_EXEC = "COROSYNC_QUORUM_HEURISTICS_ENABLED_WITH_NO_EXEC"
 COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR = "COROSYNC_QUORUM_SET_EXPECTED_VOTES_ERROR"
 COROSYNC_RUNNING_ON_NODE = "COROSYNC_RUNNING_ON_NODE"
 CRM_MON_ERROR = "CRM_MON_ERROR"
@@ -121,6 +122,7 @@ IGNORED_CMAN_UNSUPPORTED_OPTION = 'IGNORED_CMAN_UNSUPPORTED_OPTION'
 INVALID_CIB_CONTENT = "INVALID_CIB_CONTENT"
 INVALID_ID = "INVALID_ID"
 INVALID_OPTION = "INVALID_OPTION"
+INVALID_USERDEFINED_OPTIONS = "INVALID_USERDEFINED_OPTIONS"
 INVALID_OPTION_TYPE = "INVALID_OPTION_TYPE"
 INVALID_OPTION_VALUE = "INVALID_OPTION_VALUE"
 INVALID_RESOURCE_NAME = 'INVALID_RESOURCE_NAME'
@@ -186,7 +188,6 @@ REQUIRED_OPTION_OF_ALTERNATIVES_IS_MISSING = "REQUIRED_OPTION_OF_ALTERNATIVES_IS
 RESOURCE_BUNDLE_ALREADY_CONTAINS_A_RESOURCE = "RESOURCE_BUNDLE_ALREADY_CONTAINS_A_RESOURCE"
 RESOURCE_CANNOT_BE_NEXT_TO_ITSELF_IN_GROUP = "RESOURCE_CANNOT_BE_NEXT_TO_ITSELF_IN_GROUP"
 RESOURCE_CLEANUP_ERROR = "RESOURCE_CLEANUP_ERROR"
-RESOURCE_CLEANUP_TOO_TIME_CONSUMING = 'RESOURCE_CLEANUP_TOO_TIME_CONSUMING'
 RESOURCE_DOES_NOT_RUN = "RESOURCE_DOES_NOT_RUN"
 RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE = 'RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE'
 RESOURCE_IS_GUEST_NODE_ALREADY = "RESOURCE_IS_GUEST_NODE_ALREADY"
@@ -194,6 +195,8 @@ RESOURCE_IS_UNMANAGED = "RESOURCE_IS_UNMANAGED"
 RESOURCE_MANAGED_NO_MONITOR_ENABLED = "RESOURCE_MANAGED_NO_MONITOR_ENABLED"
 RESOURCE_OPERATION_INTERVAL_DUPLICATION = "RESOURCE_OPERATION_INTERVAL_DUPLICATION"
 RESOURCE_OPERATION_INTERVAL_ADAPTED = "RESOURCE_OPERATION_INTERVAL_ADAPTED"
+RESOURCE_REFRESH_ERROR = "RESOURCE_REFRESH_ERROR"
+RESOURCE_REFRESH_TOO_TIME_CONSUMING = 'RESOURCE_REFRESH_TOO_TIME_CONSUMING'
 RESOURCE_RUNNING_ON_NODES = "RESOURCE_RUNNING_ON_NODES"
 RRP_ACTIVE_NOT_SUPPORTED = 'RRP_ACTIVE_NOT_SUPPORTED'
 RUN_EXTERNAL_PROCESS_ERROR = "RUN_EXTERNAL_PROCESS_ERROR"
diff --git a/pcs/common/test/test_tools_xml_fromstring.py b/pcs/common/test/test_tools_xml_fromstring.py
new file mode 100644
index 0000000..8a459f8
--- /dev/null
+++ b/pcs/common/test/test_tools_xml_fromstring.py
@@ -0,0 +1,97 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+)
+
+from pcs.test.tools.pcs_unittest import TestCase
+from pcs.common.tools import xml_fromstring
+
+class XmlFromstring(TestCase):
+    def test_large_xml(self):
+        #it raises on a huge xml without the flag huge_tree=True
+        #see https://bugzilla.redhat.com/show_bug.cgi?id=1506864
+        xml_fromstring(large_xml)
+
+large_xml = """
+<cib admin_epoch="1" epoch="1305" num_updates="0" validate-with="pacemaker-2.8">
+      <configuration>
+        <crm_config/>
+        <nodes/>
+        <resources>{0}</resources>
+        <constraints/>
+      </configuration>
+  <status>{1}</status>
+</cib>
+""".format(
+    "".join([
+        """
+        <bundle id="scale{0}-bundle">
+            <meta_attributes id="scale{0}-bundle-meta_attributes">
+                <nvpair id="scale{0}-bundle-meta_attributes-target-role"
+                    name="target-role" value="Stopped"
+                />
+            </meta_attributes>
+            <docker run-command="/usr/sbin/pacemaker_remoted"
+                image="user:remote"
+                options="--user=root --log-driver=journald" replicas="20"
+            />
+            <network control-port="{0}"/>
+            <storage>
+                <storage-mapping target-dir="/dev/log"
+                    id="dev-log-{0}" source-dir="/dev/log"
+                />
+            </storage>
+        </bundle>
+        """.format(i) for i in range(20000)
+    ]),
+
+    "".join([
+        """
+        <node_state id="{1}" uname="c09-h05-r630" in_ccm="true" crmd="online"
+            crm-debug-origin="do_update_resource" join="member"
+            expected="member"
+        >
+            <transient_attributes id="{1}">
+                <instance_attributes id="status-{1}"/>
+            </transient_attributes>
+            <lrm id="1">
+                <lrm_resources>{0}</lrm_resources>
+            </lrm>
+        </node_state>
+        """.format("".join([
+            """
+            <lrm_resource id="scale13-bundle-{0}" type="remote" class="ocf"
+                provider="pacemaker" container="scale13-bundle-docker-{0}"
+            >
+                <lrm_rsc_op id="scale13-bundle-{0}_last_0"
+                    operation_key="scale13-bundle-{0}_start_0"
+                    operation="start" crm-debug-origin="do_update_resource"
+                    crm_feature_set="3.0.14"
+                    transition-key="2957:15:0:2459ea96-7c1d-4276-9c21-828061199"
+                    transition-magic="0:0;2957:15:0:2459ea96-7c1d-4276-9c21-828"
+                    on_node="c09-h05-r630" call-id="2223" rc-code="0"
+                    op-status="0" interval="0" last-run="1509318692"
+                    last-rc-change="1509318692" exec-time="0" queue-time="0"
+                    op-digest="802dc0edf5e736d13a41ac47626295eb"
+                    op-force-restart=" reconnect_interval  port "
+                    op-restart-digest="e38862dec2edf868edfcb2d64d77ff55"
+                />
+                <lrm_rsc_op id="scale13-bundle-{0}_monitor_60000"
+                    operation_key="scale13-bundle-1{0}_monitor_60000"
+                    operation="monitor" crm-debug-origin="do_update_resource"
+                    crm_feature_set="3.0.14"
+                    transition-key="2994:16:0:2459ea96-7c1d-4276-9c21-828061199"
+                    transition-magic="0:0;2994:16:0:2459ea96-7c1d-4276-9c21-828"
+                    on_node="c09-h05-r630" call-id="2264" rc-code="0"
+                    op-status="0" interval="60000" last-rc-change="1509318915"
+                    exec-time="0" queue-time="0"
+                    op-digest="3b2ba04195253e454b50aa4a340af042"
+                />
+            </lrm_resource>
+            """.format("{0}-{1}".format(i, j))
+            for j in range(98)
+        ]), i)
+        for i in range(5)
+    ])
+)
diff --git a/pcs/common/tools.py b/pcs/common/tools.py
index ac7a0d4..382057e 100644
--- a/pcs/common/tools.py
+++ b/pcs/common/tools.py
@@ -72,4 +72,9 @@ def xml_fromstring(xml):
     # So we encode the string to bytes.
     # In python2 we cannot do that as it causes a UnicodeDecodeError if the xml
     # contains a non-ascii character.
-    return etree.fromstring(xml if _PYTHON2 else xml.encode("utf-8"))
+    return etree.fromstring(
+        xml if _PYTHON2 else xml.encode("utf-8"),
+        #it raises on a huge xml without the flag huge_tree=True
+        #see https://bugzilla.redhat.com/show_bug.cgi?id=1506864
+        etree.XMLParser(huge_tree=True)
+    )
diff --git a/pcs/lib/booth/test/test_config_structure.py b/pcs/lib/booth/test/test_config_structure.py
index 3d89853..83de549 100644
--- a/pcs/lib/booth/test/test_config_structure.py
+++ b/pcs/lib/booth/test/test_config_structure.py
@@ -61,6 +61,7 @@ class ValidateTicketOptionsTest(TestCase):
                     "option_names": ["site"],
                     "option_type": "booth ticket",
                     "allowed": list(config_structure.TICKET_KEYS),
+                    "allowed_patterns": [],
                 },
             ),
             (
@@ -70,6 +71,7 @@ class ValidateTicketOptionsTest(TestCase):
                     "option_names": ["port"],
                     "option_type": "booth ticket",
                     "allowed": list(config_structure.TICKET_KEYS),
+                    "allowed_patterns": [],
                 },
             ),
             (
@@ -88,6 +90,7 @@ class ValidateTicketOptionsTest(TestCase):
                     "option_names": ["unknown"],
                     "option_type": "booth ticket",
                     "allowed": list(config_structure.TICKET_KEYS),
+                    "allowed_patterns": [],
                 },
                 report_codes.FORCE_OPTIONS
             ),
@@ -120,6 +123,7 @@ class ValidateTicketOptionsTest(TestCase):
                     "option_names": ["site"],
                     "option_type": "booth ticket",
                     "allowed": list(config_structure.TICKET_KEYS),
+                    "allowed_patterns": [],
                 },
             ),
         ]
@@ -143,6 +147,7 @@ class ValidateTicketOptionsTest(TestCase):
                         "option_names": ["unknown"],
                         "option_type": "booth ticket",
                         "allowed": list(config_structure.TICKET_KEYS),
+                        "allowed_patterns": [],
                     },
                 ),
             ]
diff --git a/pcs/lib/cib/test/test_constraint.py b/pcs/lib/cib/test/test_constraint.py
index 00c8ea8..9774414 100644
--- a/pcs/lib/cib/test/test_constraint.py
+++ b/pcs/lib/cib/test/test_constraint.py
@@ -222,6 +222,7 @@ class PrepareOptionsTest(TestCase):
                     "option_names": ["b"],
                     "option_type": None,
                     "allowed": ["a", "id"],
+                    "allowed_patterns": [],
                 }
             ),
         )
diff --git a/pcs/lib/cib/test/test_constraint_colocation.py b/pcs/lib/cib/test/test_constraint_colocation.py
index 56d74df..5c4713d 100644
--- a/pcs/lib/cib/test/test_constraint_colocation.py
+++ b/pcs/lib/cib/test/test_constraint_colocation.py
@@ -93,6 +93,7 @@ class PrepareOptionsWithSetTest(TestCase):
                         "score-attribute",
                         "score-attribute-mangle",
                     ],
+                    "allowed_patterns": [],
                 }
             ),
         )
diff --git a/pcs/lib/cib/test/test_constraint_order.py b/pcs/lib/cib/test/test_constraint_order.py
index e273016..57bbff5 100644
--- a/pcs/lib/cib/test/test_constraint_order.py
+++ b/pcs/lib/cib/test/test_constraint_order.py
@@ -97,6 +97,7 @@ class PrepareOptionsWithSetTest(TestCase):
                     "option_names": ["unknown"],
                     "option_type": None,
                     "allowed": [ "id", "kind", "symmetrical"],
+                    "allowed_patterns": [],
                 }
             ),
         )
diff --git a/pcs/lib/cib/test/test_constraint_ticket.py b/pcs/lib/cib/test/test_constraint_ticket.py
index 6026e3e..6c76f43 100644
--- a/pcs/lib/cib/test/test_constraint_ticket.py
+++ b/pcs/lib/cib/test/test_constraint_ticket.py
@@ -74,6 +74,7 @@ class PrepareOptionsPlainTest(TestCase):
                     "option_names": ["unknown"],
                     "option_type": None,
                     "allowed": ["id", "loss-policy", "rsc", "rsc-role", "ticket"],
+                    "allowed_patterns": [],
                 }
             ),
         )
@@ -253,12 +254,12 @@ class AreDuplicatePlain(TestCase):
     def setUp(self):
         self.first = Element({
             "ticket": "ticket_key",
-            "rsc": "resurceA",
+            "rsc": "resourceA",
             "rsc-role": "Master"
         })
         self.second = Element({
             "ticket": "ticket_key",
-            "rsc": "resurceA",
+            "rsc": "resourceA",
             "rsc-role": "Master"
         })
 
diff --git a/pcs/lib/cib/test/test_resource_guest_node.py b/pcs/lib/cib/test/test_resource_guest_node.py
index 64932c3..0e33420 100644
--- a/pcs/lib/cib/test/test_resource_guest_node.py
+++ b/pcs/lib/cib/test/test_resource_guest_node.py
@@ -145,6 +145,7 @@ class ValidateOptions(TestCase):
                         "option_type": "guest",
                         "option_names": ["invalid"],
                         "allowed": sorted(guest_node.GUEST_OPTIONS),
+                        "allowed_patterns": [],
                     },
                     None
                 ),
diff --git a/pcs/lib/cib/test/test_resource_operations.py b/pcs/lib/cib/test/test_resource_operations.py
index 72f9e40..6cb9000 100644
--- a/pcs/lib/cib/test/test_resource_operations.py
+++ b/pcs/lib/cib/test/test_resource_operations.py
@@ -273,6 +273,7 @@ class ValidateOperation(TestCase):
                         "option_names": ["unknown"],
                         "option_type": "resource operation",
                         "allowed": sorted(operations.ATTRIBUTES),
+                        "allowed_patterns": [],
                     },
                     None
                 ),
diff --git a/pcs/lib/cib/test/test_resource_remote_node.py b/pcs/lib/cib/test/test_resource_remote_node.py
index 4d0942d..2fea22e 100644
--- a/pcs/lib/cib/test/test_resource_remote_node.py
+++ b/pcs/lib/cib/test/test_resource_remote_node.py
@@ -279,6 +279,7 @@ class Validate(TestCase):
                         'option_type': 'resource',
                         'option_names': ['server'],
                         'allowed': [],
+                        "allowed_patterns": [],
                     },
                     None
                 )
diff --git a/pcs/lib/cib/test/test_resource_set.py b/pcs/lib/cib/test/test_resource_set.py
index 9282b19..72ba337 100644
--- a/pcs/lib/cib/test/test_resource_set.py
+++ b/pcs/lib/cib/test/test_resource_set.py
@@ -19,7 +19,7 @@ from pcs.test.tools.pcs_unittest import mock
 
 
 class PrepareSetTest(TestCase):
-    def test_return_corrected_resurce_set(self):
+    def test_return_corrected_resource_set(self):
         find_valid_id = mock.Mock()
         find_valid_id.side_effect = lambda id: {"A": "AA", "B": "BB"}[id]
         self.assertEqual(
@@ -43,6 +43,7 @@ class PrepareSetTest(TestCase):
                     "option_names": ["invalid_name"],
                     "option_type": None,
                     "allowed": ["action", "require-all", "role", "sequential"],
+                    "allowed_patterns": [],
             }),
         )
 
diff --git a/pcs/lib/cib/test/test_tools.py b/pcs/lib/cib/test/test_tools.py
index e495cff..62d1d45 100644
--- a/pcs/lib/cib/test/test_tools.py
+++ b/pcs/lib/cib/test/test_tools.py
@@ -412,7 +412,10 @@ class GetPacemakerVersionByWhichCibWasValidatedTest(TestCase):
             (
                 severities.ERROR,
                 report_codes.CIB_LOAD_ERROR_BAD_FORMAT,
-                {}
+                {
+                    "reason": "the attribute 'validate-with' of the element"
+                        " 'cib' is missing"
+                }
             )
         )
 
@@ -424,7 +427,10 @@ class GetPacemakerVersionByWhichCibWasValidatedTest(TestCase):
             (
                 severities.ERROR,
                 report_codes.CIB_LOAD_ERROR_BAD_FORMAT,
-                {}
+                {
+                    "reason": "the attribute 'validate-with' of the element"
+                        " 'cib' has an invalid value: 'something-1.2.3'"
+                }
             )
         )
 
diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py
index 3af23f6..60173c1 100644
--- a/pcs/lib/cib/tools.py
+++ b/pcs/lib/cib/tools.py
@@ -248,14 +248,19 @@ def get_pacemaker_version_by_which_cib_was_validated(cib):
     """
     version = cib.get("validate-with")
     if version is None:
-        raise LibraryError(reports.cib_load_error_invalid_format())
+        raise LibraryError(reports.cib_load_error_invalid_format(
+            "the attribute 'validate-with' of the element 'cib' is missing"
+        ))
 
     regexp = re.compile(
         r"pacemaker-(?P<major>\d+)\.(?P<minor>\d+)(\.(?P<rev>\d+))?"
     )
     match = regexp.match(version)
     if not match:
-        raise LibraryError(reports.cib_load_error_invalid_format())
+        raise LibraryError(reports.cib_load_error_invalid_format(
+            "the attribute 'validate-with' of the element 'cib' has an invalid"
+            " value: '{0}'".format(version)
+        ))
     return (
         int(match.group("major")),
         int(match.group("minor")),
diff --git a/pcs/lib/commands/quorum.py b/pcs/lib/commands/quorum.py
index 6d0ab38..3e9db0e 100644
--- a/pcs/lib/commands/quorum.py
+++ b/pcs/lib/commands/quorum.py
@@ -28,11 +28,14 @@ def get_config(lib_env):
     cfg = lib_env.get_corosync_conf()
     device = None
     if cfg.has_quorum_device():
-        model, model_options, generic_options = cfg.get_quorum_device_settings()
+        model, model_options, generic_options, heuristics_options = (
+            cfg.get_quorum_device_settings()
+        )
         device = {
             "model": model,
             "model_options": model_options,
             "generic_options": generic_options,
+            "heuristics_options": heuristics_options,
         }
     return {
         "options": cfg.get_quorum_options(),
@@ -101,17 +104,19 @@ def status_device_text(lib_env, verbose=False):
     return qdevice_client.get_status_text(lib_env.cmd_runner(), verbose)
 
 def add_device(
-    lib_env, model, model_options, generic_options, force_model=False,
-    force_options=False, skip_offline_nodes=False
+    lib_env, model, model_options, generic_options, heuristics_options,
+    force_model=False, force_options=False, skip_offline_nodes=False
 ):
     """
-    Add quorum device to cluster, distribute and reload configs if live
-    model quorum device model
-    model_options model specific options dict
-    generic_options generic quorum device options dict
-    force_model continue even if the model is not valid
-    force_options continue even if options are not valid
-    skip_offline_nodes continue even if not all nodes are accessible
+    Add a quorum device to a cluster, distribute and reload configs if live
+
+    string model -- quorum device model
+    dict model_options -- model specific options
+    dict generic_options -- generic quorum device options
+    dict heuristics_options -- heuristics options
+    bool force_model -- continue even if the model is not valid
+    bool force_options -- continue even if options are not valid
+    bool skip_offline_nodes -- continue even if not all nodes are accessible
     """
     __ensure_not_cman(lib_env)
 
@@ -123,8 +128,9 @@ def add_device(
         model,
         model_options,
         generic_options,
-        force_model,
-        force_options
+        heuristics_options,
+        force_model=force_model,
+        force_options=force_options
     )
     target_list = lib_env.get_node_target_factory().get_target_list(
         cfg.get_nodes()
@@ -222,15 +228,17 @@ def _add_device_model_net(
     run_and_raise(lib_env.get_node_communicator(), com_cmd)
 
 def update_device(
-    lib_env, model_options, generic_options, force_options=False,
-    skip_offline_nodes=False
+    lib_env, model_options, generic_options, heuristics_options,
+    force_options=False, skip_offline_nodes=False
 ):
     """
     Change quorum device settings, distribute and reload configs if live
-    model_options model specific options dict
-    generic_options generic quorum device options dict
-    force_options continue even if options are not valid
-    skip_offline_nodes continue even if not all nodes are accessible
+
+    dict model_options -- model specific options
+    dict generic_options -- generic quorum device options
+    dict heuristics_options -- heuristics options
+    bool force_options -- continue even if options are not valid
+    bool skip_offline_nodes -- continue even if not all nodes are accessible
     """
     __ensure_not_cman(lib_env)
     cfg = lib_env.get_corosync_conf()
@@ -238,10 +246,22 @@ def update_device(
         lib_env.report_processor,
         model_options,
         generic_options,
-        force_options
+        heuristics_options,
+        force_options=force_options
     )
     lib_env.push_corosync_conf(cfg, skip_offline_nodes)
 
+def remove_device_heuristics(lib_env, skip_offline_nodes=False):
+    """
+    Stop using quorum device heuristics, distribute and reload configs if live
+
+    bool skip_offline_nodes -- continue even if not all nodes are accessible
+    """
+    __ensure_not_cman(lib_env)
+    cfg = lib_env.get_corosync_conf()
+    cfg.remove_quorum_device_heuristics()
+    lib_env.push_corosync_conf(cfg, skip_offline_nodes)
+
 def remove_device(lib_env, skip_offline_nodes=False):
     """
     Stop using quorum device, distribute and reload configs if live
@@ -250,7 +270,9 @@ def remove_device(lib_env, skip_offline_nodes=False):
     __ensure_not_cman(lib_env)
 
     cfg = lib_env.get_corosync_conf()
-    model, dummy_options, dummy_options = cfg.get_quorum_device_settings()
+    model, dummy_options, dummy_options, dummy_options = (
+        cfg.get_quorum_device_settings()
+    )
     cfg.remove_quorum_device()
 
     if lib_env.is_corosync_conf_live:
@@ -333,4 +355,3 @@ def set_expected_votes_live(lib_env, expected_votes):
 def __ensure_not_cman(lib_env):
     if lib_env.is_corosync_conf_live and lib_env.is_cman_cluster:
         raise LibraryError(reports.cman_unsupported_command())
-
diff --git a/pcs/lib/commands/sbd.py b/pcs/lib/commands/sbd.py
index 8dc315c..8304942 100644
--- a/pcs/lib/commands/sbd.py
+++ b/pcs/lib/commands/sbd.py
@@ -68,8 +68,14 @@ def _validate_sbd_options(sbd_config, allow_unknown_opts=False):
                 [sbd_opt],
                 allowed_sbd_options,
                 None,
-                Severities.WARNING if allow_unknown_opts else Severities.ERROR,
-                None if allow_unknown_opts else report_codes.FORCE_OPTIONS
+                severity=(
+                    Severities.WARNING if allow_unknown_opts
+                    else Severities.ERROR
+                ),
+                forceable=(
+                    None if allow_unknown_opts
+                    else report_codes.FORCE_OPTIONS
+                )
             ))
     if "SBD_WATCHDOG_TIMEOUT" in sbd_config:
         report_item = reports.invalid_option_value(
diff --git a/pcs/lib/commands/test/resource/test_bundle_create.py b/pcs/lib/commands/test/resource/test_bundle_create.py
index e9f6c10..8f5ebe5 100644
--- a/pcs/lib/commands/test/resource/test_bundle_create.py
+++ b/pcs/lib/commands/test/resource/test_bundle_create.py
@@ -254,6 +254,7 @@ class CreateDocker(TestCase):
                         "option_names": ["extra", ],
                         "option_type": "container",
                         "allowed": self.allowed_options,
+                        "allowed_patterns": [],
                     },
                     report_codes.FORCE_OPTIONS
                 ),
@@ -286,6 +287,7 @@ class CreateDocker(TestCase):
                     "option_names": ["extra", ],
                     "option_type": "container",
                     "allowed": self.allowed_options,
+                    "allowed_patterns": [],
                 },
                 None
             ),
@@ -379,6 +381,7 @@ class CreateWithNetwork(TestCase):
                         "option_names": ["extra", ],
                         "option_type": "network",
                         "allowed": self.allowed_options,
+                        "allowed_patterns": [],
                     },
                     report_codes.FORCE_OPTIONS
                 ),
@@ -426,6 +429,7 @@ class CreateWithNetwork(TestCase):
                     "option_names": ["extra", ],
                     "option_type": "network",
                     "allowed": self.allowed_options,
+                    "allowed_patterns": [],
                 },
                 None
             ),
@@ -625,6 +629,7 @@ class CreateWithPortMap(TestCase):
                         "option_names": ["extra", ],
                         "option_type": "port-map",
                         "allowed": self.allowed_options,
+                        "allowed_patterns": [],
                     },
                     report_codes.FORCE_OPTIONS
                 ),
@@ -681,6 +686,7 @@ class CreateWithPortMap(TestCase):
                     "option_names": ["extra", ],
                     "option_type": "port-map",
                     "allowed": self.allowed_options,
+                    "allowed_patterns": [],
                 },
                 None
             ),
@@ -856,6 +862,7 @@ class CreateWithStorageMap(TestCase):
                         "option_names": ["extra", ],
                         "option_type": "storage-map",
                         "allowed": self.allowed_options,
+                        "allowed_patterns": [],
                     },
                     report_codes.FORCE_OPTIONS
                 ),
@@ -905,6 +912,7 @@ class CreateWithStorageMap(TestCase):
                         "option_names": ["extra", ],
                         "option_type": "storage-map",
                         "allowed": self.allowed_options,
+                        "allowed_patterns": [],
                     },
                     None
                 ),
diff --git a/pcs/lib/corosync/config_facade.py b/pcs/lib/corosync/config_facade.py
index 2145eb0..b033976 100644
--- a/pcs/lib/corosync/config_facade.py
+++ b/pcs/lib/corosync/config_facade.py
@@ -4,8 +4,10 @@ from __future__ import (
     print_function,
 )
 
+import re
+
 from pcs.common import report_codes
-from pcs.lib import reports
+from pcs.lib import reports, validate
 from pcs.lib.errors import ReportItemSeverity, LibraryError
 from pcs.lib.corosync import config_parser
 from pcs.lib.node import NodeAddresses, NodeAddressesList
@@ -26,7 +28,7 @@ class ConfigFacade(object):
         "last_man_standing",
         "last_man_standing_window",
     )
-
+    __QUORUM_DEVICE_HEURISTICS_EXEC_NAME_RE = re.compile("^exec_[^.:{}#\s]+$")
 
     @classmethod
     def from_string(cls, config_string):
@@ -199,6 +201,7 @@ class ConfigFacade(object):
         model = None
         model_options = {}
         generic_options = {}
+        heuristics_options = {}
         for quorum in self.config.get_sections("quorum"):
             for device in quorum.get_sections("device"):
                 for name, value in device.get_attributes():
@@ -207,24 +210,34 @@ class ConfigFacade(object):
                     else:
                         generic_options[name] = value
                 for subsection in device.get_sections():
+                    if subsection.name == "heuristics":
+                        heuristics_options.update(subsection.get_attributes())
+                        continue
                     if subsection.name not in model_options:
                         model_options[subsection.name] = {}
                     model_options[subsection.name].update(
                         subsection.get_attributes()
                     )
-        return model, model_options.get(model, {}), generic_options
+        return (
+            model,
+            model_options.get(model, {}),
+            generic_options,
+            heuristics_options,
+        )
 
     def add_quorum_device(
         self, report_processor, model, model_options, generic_options,
-        force_model=False, force_options=False,
+        heuristics_options, force_model=False, force_options=False,
     ):
         """
         Add quorum device configuration
-        model quorum device model
-        model_options model specific options dict
-        generic_options generic quorum device options dict
-        force_model continue even if the model is not valid
-        force_options continue even if options are not valid
+
+        string model -- quorum device model
+        dict model_options -- model specific options
+        dict generic_options -- generic quorum device options
+        dict heuristics_options -- heuristics options
+        bool force_model -- continue even if the model is not valid
+        bool force_options -- continue even if options are not valid
         """
         # validation
         if self.has_quorum_device():
@@ -243,6 +256,11 @@ class ConfigFacade(object):
                 generic_options,
                 force=force_options
             )
+            +
+            self.__validate_quorum_device_add_heuristics(
+                heuristics_options,
+                force_options=force_options
+            )
         )
 
         # configuration cleanup
@@ -283,19 +301,30 @@ class ConfigFacade(object):
         new_model = config_parser.Section(model)
         self.__set_section_options([new_model], model_options)
         new_device.add_section(new_model)
+        new_heuristics = config_parser.Section("heuristics")
+        self.__set_section_options([new_heuristics], heuristics_options)
+        new_device.add_section(new_heuristics)
+
+        if self.__is_heuristics_enabled_with_no_exec():
+            report_processor.process(
+                reports.corosync_quorum_heuristics_enabled_with_no_exec()
+            )
+
         self.__update_qdevice_votes()
         self.__update_two_node()
         self.__remove_empty_sections(self.config)
 
     def update_quorum_device(
         self, report_processor, model_options, generic_options,
-        force_options=False
+        heuristics_options, force_options=False
     ):
         """
         Update existing quorum device configuration
-        model_options model specific options dict
-        generic_options generic quorum device options dict
-        force_options continue even if options are not valid
+
+        dict model_options -- model specific options
+        dict generic_options -- generic quorum device options
+        dict heuristics_options -- heuristics options
+        bool force_options -- continue even if options are not valid
         """
         # validation
         if not self.has_quorum_device():
@@ -317,21 +346,61 @@ class ConfigFacade(object):
                 generic_options,
                 force=force_options
             )
+            +
+            self.__validate_quorum_device_update_heuristics(
+                heuristics_options,
+                force_options=force_options
+            )
         )
+
         # set new configuration
         device_sections = []
         model_sections = []
+        heuristics_sections = []
+
         for quorum in self.config.get_sections("quorum"):
             device_sections.extend(quorum.get_sections("device"))
             for device in quorum.get_sections("device"):
                 model_sections.extend(device.get_sections(model))
+                heuristics_sections.extend(device.get_sections("heuristics"))
+        # we know device sections exist, otherwise the function would exit at
+        # has_quorum_device line above
+        if not model_sections:
+            new_model = config_parser.Section(model)
+            device_sections[-1].add_section(new_model)
+            model_sections.append(new_model)
+        if not heuristics_sections:
+            new_heuristics = config_parser.Section("heuristics")
+            device_sections[-1].add_section(new_heuristics)
+            heuristics_sections.append(new_heuristics)
+
         self.__set_section_options(device_sections, generic_options)
         self.__set_section_options(model_sections, model_options)
+        self.__set_section_options(heuristics_sections, heuristics_options)
+
+        if self.__is_heuristics_enabled_with_no_exec():
+            report_processor.process(
+                reports.corosync_quorum_heuristics_enabled_with_no_exec()
+            )
+
         self.__update_qdevice_votes()
         self.__update_two_node()
         self.__remove_empty_sections(self.config)
         self._need_qdevice_reload = True
 
+    def remove_quorum_device_heuristics(self):
+        """
+        Remove quorum device heuristics configuration
+        """
+        if not self.has_quorum_device():
+            raise LibraryError(reports.qdevice_not_defined())
+        for quorum in self.config.get_sections("quorum"):
+            for device in quorum.get_sections("device"):
+                for heuristics in device.get_sections("heuristics"):
+                    device.del_section(heuristics)
+        self.__remove_empty_sections(self.config)
+        self._need_qdevice_reload = True
+
     def remove_quorum_device(self):
         """
         Remove all quorum device configuration
@@ -355,9 +424,13 @@ class ConfigFacade(object):
                 "model",
                 model,
                 allowed_values,
-                ReportItemSeverity.WARNING if force_model
-                    else ReportItemSeverity.ERROR,
-                None if force_model else report_codes.FORCE_QDEVICE_MODEL
+                severity=(
+                    ReportItemSeverity.WARNING if force_model
+                    else ReportItemSeverity.ERROR
+                ),
+                forceable=(
+                    None if force_model else report_codes.FORCE_QDEVICE_MODEL
+                )
             ))
 
         return report_items
@@ -369,7 +442,7 @@ class ConfigFacade(object):
             return self.__validate_quorum_device_model_net_options(
                 model_options,
                 need_required,
-                force
+                force=force
             )
         return []
 
@@ -401,8 +474,8 @@ class ConfigFacade(object):
                     [name],
                     allowed_options,
                     "quorum device model",
-                    severity,
-                    forceable
+                    severity=severity,
+                    forceable=forceable
                 ))
                 continue
 
@@ -417,7 +490,11 @@ class ConfigFacade(object):
                 allowed_values = ("ffsplit", "lms")
                 if value not in allowed_values:
                     report_items.append(reports.invalid_option_value(
-                        name, value, allowed_values, severity, forceable
+                        name,
+                        value,
+                        allowed_values,
+                        severity=severity,
+                        forceable=forceable
                     ))
 
             if name == "connect_timeout":
@@ -425,14 +502,22 @@ class ConfigFacade(object):
                 if not (value.isdigit() and minimum <= int(value) <= maximum):
                     min_max = "{min}-{max}".format(min=minimum, max=maximum)
                     report_items.append(reports.invalid_option_value(
-                        name, value, min_max, severity, forceable
+                        name,
+                        value,
+                        min_max,
+                        severity=severity,
+                        forceable=forceable
                     ))
 
             if name == "force_ip_version":
                 allowed_values = ("0", "4", "6")
                 if value not in allowed_values:
                     report_items.append(reports.invalid_option_value(
-                        name, value, allowed_values, severity, forceable
+                        name,
+                        value,
+                        allowed_values,
+                        severity=severity,
+                        forceable=forceable
                     ))
 
             if name == "port":
@@ -440,7 +525,11 @@ class ConfigFacade(object):
                 if not (value.isdigit() and minimum <= int(value) <= maximum):
                     min_max = "{min}-{max}".format(min=minimum, max=maximum)
                     report_items.append(reports.invalid_option_value(
-                        name, value, min_max, severity, forceable
+                        name,
+                        value,
+                        min_max,
+                        severity=severity,
+                        forceable=forceable
                     ))
 
             if name == "tie_breaker":
@@ -449,7 +538,11 @@ class ConfigFacade(object):
                 if value not in allowed_nonid + node_ids:
                     allowed_values = allowed_nonid + ["valid node id"]
                     report_items.append(reports.invalid_option_value(
-                        name, value, allowed_values, severity, forceable
+                        name,
+                        value,
+                        allowed_values,
+                        severity=severity,
+                        forceable=forceable
                     ))
 
         if missing_options:
@@ -481,8 +574,11 @@ class ConfigFacade(object):
                     [name],
                     allowed_options,
                     "quorum device",
-                    severity if name != "model" else ReportItemSeverity.ERROR,
-                    forceable if name != "model" else None
+                    severity=(
+                        severity if name != "model"
+                        else ReportItemSeverity.ERROR
+                    ),
+                    forceable=(forceable if name != "model" else None)
                 ))
                 continue
 
@@ -491,11 +587,171 @@ class ConfigFacade(object):
 
             if not value.isdigit():
                 report_items.append(reports.invalid_option_value(
-                    name, value, "positive integer", severity, forceable
+                    name,
+                    value,
+                    "positive integer",
+                    severity=severity,
+                    forceable=forceable
                 ))
 
         return report_items
 
+    def __split_heuristics_exec_options(self, heuristics_options):
+        options_exec = dict()
+        options_nonexec = dict()
+        for name, value in heuristics_options.items():
+            if name.startswith("exec_"):
+                options_exec[name] = value
+            else:
+                options_nonexec[name] = value
+        return options_nonexec, options_exec
+
+    def __get_heuristics_options_validators(
+        self, allow_empty_values=False, force_options=False
+    ):
+        validators = {
+            "mode": validate.value_in(
+                "mode",
+                ("off", "on", "sync"),
+                code_to_allow_extra_values=report_codes.FORCE_OPTIONS,
+                allow_extra_values=force_options
+            ),
+            "interval": validate.value_positive_integer(
+                "interval",
+                code_to_allow_extra_values=report_codes.FORCE_OPTIONS,
+                allow_extra_values=force_options
+            ),
+            "sync_timeout": validate.value_positive_integer(
+                "sync_timeout",
+                code_to_allow_extra_values=report_codes.FORCE_OPTIONS,
+                allow_extra_values=force_options
+            ),
+            "timeout": validate.value_positive_integer(
+                "timeout",
+                code_to_allow_extra_values=report_codes.FORCE_OPTIONS,
+                allow_extra_values=force_options
+            ),
+        }
+        if not allow_empty_values:
+            # make sure to return a list even in python3 so we can call append
+            # on it
+            return list(validators.values())
+        return [
+            validate.value_empty_or_valid(option_name, validator)
+            for option_name, validator in validators.items()
+        ]
+
+    def __validate_heuristics_noexec_option_names(
+        self, options_nonexec, force_options=False
+    ):
+        return validate.names_in(
+            ("mode", "interval", "sync_timeout", "timeout"),
+            options_nonexec.keys(),
+            "heuristics",
+            report_codes.FORCE_OPTIONS,
+            allow_extra_names=force_options,
+            allowed_option_patterns=["exec_NAME"]
+        )
+
+    def __validate_heuristics_exec_option_names(self, options_exec):
+        # We must be strict and do not allow to override this validation,
+        # otherwise setting a cratfed exec_NAME could be misused for setting
+        # arbitrary corosync.conf settings.
+        regexp = self.__QUORUM_DEVICE_HEURISTICS_EXEC_NAME_RE
+        report_list = []
+        valid_options = []
+        not_valid_options = []
+        for name in options_exec:
+            if regexp.match(name) is None:
+                not_valid_options.append(name)
+            else:
+                valid_options.append(name)
+        if not_valid_options:
+            report_list.append(
+                reports.invalid_userdefined_options(
+                    not_valid_options,
+                    "exec_NAME cannot contain '.:{}#' and whitespace characters",
+                    "heuristics",
+                    severity=ReportItemSeverity.ERROR,
+                    forceable=None
+                )
+            )
+        return report_list, valid_options
+
+    def __validate_quorum_device_add_heuristics(
+        self, heuristics_options, force_options=False
+    ):
+        report_list = []
+        options_nonexec, options_exec = self.__split_heuristics_exec_options(
+            heuristics_options
+        )
+        validators = self.__get_heuristics_options_validators(
+            force_options=force_options
+        )
+        exec_options_reports, valid_exec_options = (
+            self.__validate_heuristics_exec_option_names(options_exec)
+        )
+        for option in valid_exec_options:
+            validators.append(
+                validate.value_not_empty(option, "a command to be run")
+            )
+        report_list.extend(
+            validate.run_collection_of_option_validators(
+                heuristics_options, validators
+            )
+            +
+            self.__validate_heuristics_noexec_option_names(
+                options_nonexec, force_options=force_options
+            )
+            +
+            exec_options_reports
+        )
+        return report_list
+
+    def __validate_quorum_device_update_heuristics(
+        self, heuristics_options, force_options=False
+    ):
+        report_list = []
+        options_nonexec, options_exec = self.__split_heuristics_exec_options(
+            heuristics_options
+        )
+        validators = self.__get_heuristics_options_validators(
+            allow_empty_values=True, force_options=force_options
+        )
+        # no validation necessary for values of valid exec options - they are
+        # either empty (meaning they should be removed) or nonempty strings
+        exec_options_reports, dummy_valid_exec_options = (
+            self.__validate_heuristics_exec_option_names(options_exec)
+        )
+        report_list.extend(
+            validate.run_collection_of_option_validators(
+                heuristics_options, validators
+            )
+            +
+            self.__validate_heuristics_noexec_option_names(
+                options_nonexec, force_options=force_options
+            )
+            +
+            exec_options_reports
+        )
+        return report_list
+
+    def __is_heuristics_enabled_with_no_exec(self):
+        regexp = self.__QUORUM_DEVICE_HEURISTICS_EXEC_NAME_RE
+        mode = None
+        exec_found = False
+        for quorum in self.config.get_sections("quorum"):
+            for device in quorum.get_sections("device"):
+                for heuristics in device.get_sections("heuristics"):
+                    for name, value in heuristics.get_attributes():
+                        if name == "mode" and value:
+                            # Cannot break, must go through all modes, the last
+                            # one matters
+                            mode = value
+                        elif regexp.match(name) and value:
+                            exec_found = True
+        return not exec_found and mode in ("on", "sync")
+
     def __update_two_node(self):
         # get relevant status
         has_quorum_device = self.has_quorum_device()
diff --git a/pcs/lib/corosync/qdevice_net.py b/pcs/lib/corosync/qdevice_net.py
index ab2379d..0550537 100644
--- a/pcs/lib/corosync/qdevice_net.py
+++ b/pcs/lib/corosync/qdevice_net.py
@@ -9,12 +9,12 @@ import os
 import os.path
 import re
 import shutil
-import tempfile
 
 from pcs import settings
 from pcs.common.tools import join_multilines
 from pcs.lib import external, reports
 from pcs.lib.errors import LibraryError
+from pcs.lib.tools import write_tmpfile
 
 
 __model = "net"
@@ -329,10 +329,7 @@ def client_import_certificate_and_key(runner, pk12_certificate):
 
 def _store_to_tmpfile(data, report_func):
     try:
-        tmpfile = tempfile.NamedTemporaryFile(mode="wb", suffix=".pcs")
-        tmpfile.write(data)
-        tmpfile.flush()
-        return tmpfile
+        return write_tmpfile(data, binary=True)
     except EnvironmentError as e:
         raise LibraryError(report_func(e.strerror))
 
diff --git a/pcs/lib/pacemaker/live.py b/pcs/lib/pacemaker/live.py
index 6ef5b29..8e21a96 100644
--- a/pcs/lib/pacemaker/live.py
+++ b/pcs/lib/pacemaker/live.py
@@ -23,7 +23,7 @@ from pcs.lib.xml_tools import etree_to_str
 __EXITCODE_WAIT_TIMEOUT = 62
 __EXITCODE_CIB_SCOPE_VALID_BUT_NOT_PRESENT = 6
 __EXITCODE_CIB_SCHEMA_IS_THE_LATEST_AVAILABLE = 211
-__RESOURCE_CLEANUP_OPERATION_COUNT_THRESHOLD = 100
+__RESOURCE_REFRESH_OPERATION_COUNT_THRESHOLD = 100
 
 class CrmMonErrorException(LibraryError):
     pass
@@ -71,8 +71,8 @@ def parse_cib_xml(xml):
 def get_cib(xml):
     try:
         return parse_cib_xml(xml)
-    except (etree.XMLSyntaxError, etree.DocumentInvalid):
-        raise LibraryError(reports.cib_load_error_invalid_format())
+    except (etree.XMLSyntaxError, etree.DocumentInvalid) as e:
+        raise LibraryError(reports.cib_load_error_invalid_format(str(e)))
 
 def verify(runner, verbose=False):
     crm_verify_cmd = [__exec("crm_verify")]
@@ -313,28 +313,50 @@ def remove_node(runner, node_name):
 
 ### resources
 
-def resource_cleanup(runner, resource=None, node=None, force=False):
+def resource_cleanup(runner, resource=None, node=None):
+    cmd = [__exec("crm_resource"), "--cleanup"]
+    if resource:
+        cmd.extend(["--resource", resource])
+    if node:
+        cmd.extend(["--node", node])
+
+    stdout, stderr, retval = runner.run(cmd)
+
+    if retval != 0:
+        raise LibraryError(
+            reports.resource_cleanup_error(
+                join_multilines([stderr, stdout]),
+                resource,
+                node
+            )
+        )
+    # usefull output (what has been done) goes to stderr
+    return join_multilines([stdout, stderr])
+
+def resource_refresh(runner, resource=None, node=None, full=False, force=None):
     if not force and not node and not resource:
         summary = ClusterState(get_cluster_status_xml(runner)).summary
         operations = summary.nodes.attrs.count * summary.resources.attrs.count
-        if operations > __RESOURCE_CLEANUP_OPERATION_COUNT_THRESHOLD:
+        if operations > __RESOURCE_REFRESH_OPERATION_COUNT_THRESHOLD:
             raise LibraryError(
-                reports.resource_cleanup_too_time_consuming(
-                    __RESOURCE_CLEANUP_OPERATION_COUNT_THRESHOLD
+                reports.resource_refresh_too_time_consuming(
+                    __RESOURCE_REFRESH_OPERATION_COUNT_THRESHOLD
                 )
             )
 
-    cmd = [__exec("crm_resource"), "--cleanup"]
+    cmd = [__exec("crm_resource"), "--refresh"]
     if resource:
         cmd.extend(["--resource", resource])
     if node:
         cmd.extend(["--node", node])
+    if full:
+        cmd.extend(["--force"])
 
     stdout, stderr, retval = runner.run(cmd)
 
     if retval != 0:
         raise LibraryError(
-            reports.resource_cleanup_error(
+            reports.resource_refresh_error(
                 join_multilines([stderr, stdout]),
                 resource,
                 node
diff --git a/pcs/lib/pacemaker/test/test_live.py b/pcs/lib/pacemaker/test/test_live.py
index bb731c0..a5a2eb7 100644
--- a/pcs/lib/pacemaker/test/test_live.py
+++ b/pcs/lib/pacemaker/test/test_live.py
@@ -12,6 +12,8 @@ from pcs.test.tools.assertions import (
     assert_xml_equal,
     start_tag_error_text,
 )
+from pcs.test.tools import fixture
+from pcs.test.tools.command_env import get_env_tools
 from pcs.test.tools.misc import get_test_resource as rc
 from pcs.test.tools.pcs_unittest import TestCase, mock
 from pcs.test.tools.xml import XmlManipulation
@@ -666,7 +668,91 @@ class RemoveNode(LibraryPacemakerTest):
             )
         )
 
-class ResourceCleanupTest(LibraryPacemakerTest):
+
+class ResourceCleanupTest(TestCase):
+    def setUp(self):
+        self.stdout = "expected output"
+        self.stderr = "expected stderr"
+        self.resource = "my_resource"
+        self.node = "my_node"
+        self.env_assist, self.config = get_env_tools(test_case=self)
+
+    def assert_output(self, real_output):
+        self.assertEqual(
+            self.stdout + "\n" + self.stderr,
+            real_output
+        )
+
+    def test_basic(self):
+        self.config.runner.pcmk.resource_cleanup(
+            stdout=self.stdout,
+            stderr=self.stderr
+        )
+        env = self.env_assist.get_env()
+        real_output = lib.resource_cleanup(env.cmd_runner())
+        self.assert_output(real_output)
+
+    def test_resource(self):
+        self.config.runner.pcmk.resource_cleanup(
+            stdout=self.stdout,
+            stderr=self.stderr,
+            resource=self.resource
+        )
+        env = self.env_assist.get_env()
+        real_output = lib.resource_cleanup(
+            env.cmd_runner(), resource=self.resource
+        )
+        self.assert_output(real_output)
+
+    def test_node(self):
+        self.config.runner.pcmk.resource_cleanup(
+            stdout=self.stdout,
+            stderr=self.stderr,
+            node=self.node
+        )
+
+        env = self.env_assist.get_env()
+        real_output = lib.resource_cleanup(
+            env.cmd_runner(), node=self.node
+        )
+        self.assert_output(real_output)
+
+    def test_all_options(self):
+        self.config.runner.pcmk.resource_cleanup(
+            stdout=self.stdout,
+            stderr=self.stderr,
+            resource=self.resource,
+            node=self.node
+        )
+
+        env = self.env_assist.get_env()
+        real_output = lib.resource_cleanup(
+            env.cmd_runner(), resource=self.resource, node=self.node
+        )
+        self.assert_output(real_output)
+
+    def test_error_cleanup(self):
+        self.config.runner.pcmk.resource_cleanup(
+            stdout=self.stdout,
+            stderr=self.stderr,
+            returncode=1
+        )
+
+        env = self.env_assist.get_env()
+        self.env_assist.assert_raise_library_error(
+            lambda: lib.resource_cleanup(env.cmd_runner()),
+            [
+                fixture.error(
+                    report_codes.RESOURCE_CLEANUP_ERROR,
+                    force_code=None,
+                    reason=(self.stderr + "\n" + self.stdout)
+                )
+            ],
+            expected_in_processor=False
+        )
+
+
+class ResourceRefreshTest(LibraryPacemakerTest):
     def fixture_status_xml(self, nodes, resources):
         xml_man = XmlManipulation.from_file(rc("crm_mon.minimal.xml"))
         doc = xml_man.tree.getroottree()
@@ -680,7 +766,7 @@ class ResourceCleanupTest(LibraryPacemakerTest):
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
         call_list = [
             mock.call(self.crm_mon_cmd()),
-            mock.call([self.path("crm_resource"), "--cleanup"]),
+            mock.call([self.path("crm_resource"), "--refresh"]),
         ]
         return_value_list = [
             (self.fixture_status_xml(1, 1), "", 0),
@@ -688,7 +774,7 @@ class ResourceCleanupTest(LibraryPacemakerTest):
         ]
         mock_runner.run.side_effect = return_value_list
 
-        real_output = lib.resource_cleanup(mock_runner)
+        real_output = lib.resource_refresh(mock_runner)
 
         self.assertEqual(len(return_value_list), len(call_list))
         self.assertEqual(len(return_value_list), mock_runner.run.call_count)
@@ -706,10 +792,10 @@ class ResourceCleanupTest(LibraryPacemakerTest):
         )
 
         assert_raise_library_error(
-            lambda: lib.resource_cleanup(mock_runner),
+            lambda: lib.resource_refresh(mock_runner),
             (
                 Severity.ERROR,
-                report_codes.RESOURCE_CLEANUP_TOO_TIME_CONSUMING,
+                report_codes.RESOURCE_REFRESH_TOO_TIME_CONSUMING,
                 {"threshold": 100},
                 report_codes.FORCE_LOAD_THRESHOLD
             )
@@ -717,15 +803,15 @@ class ResourceCleanupTest(LibraryPacemakerTest):
 
         mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
 
-    def test_forced(self):
+    def test_threshold_exceeded_forced(self):
         expected_stdout = "expected output"
         expected_stderr = "expected stderr"
         mock_runner = get_runner(expected_stdout, expected_stderr, 0)
 
-        real_output = lib.resource_cleanup(mock_runner, force=True)
+        real_output = lib.resource_refresh(mock_runner, force=True)
 
         mock_runner.run.assert_called_once_with(
-            [self.path("crm_resource"), "--cleanup"]
+            [self.path("crm_resource"), "--refresh"]
         )
         self.assertEqual(
             expected_stdout + "\n" + expected_stderr,
@@ -738,10 +824,10 @@ class ResourceCleanupTest(LibraryPacemakerTest):
         expected_stderr = "expected stderr"
         mock_runner = get_runner(expected_stdout, expected_stderr, 0)
 
-        real_output = lib.resource_cleanup(mock_runner, resource=resource)
+        real_output = lib.resource_refresh(mock_runner, resource=resource)
 
         mock_runner.run.assert_called_once_with(
-            [self.path("crm_resource"), "--cleanup", "--resource", resource]
+            [self.path("crm_resource"), "--refresh", "--resource", resource]
         )
         self.assertEqual(
             expected_stdout + "\n" + expected_stderr,
@@ -754,31 +840,55 @@ class ResourceCleanupTest(LibraryPacemakerTest):
         expected_stderr = "expected stderr"
         mock_runner = get_runner(expected_stdout, expected_stderr, 0)
 
-        real_output = lib.resource_cleanup(mock_runner, node=node)
+        real_output = lib.resource_refresh(mock_runner, node=node)
 
         mock_runner.run.assert_called_once_with(
-            [self.path("crm_resource"), "--cleanup", "--node", node]
+            [self.path("crm_resource"), "--refresh", "--node", node]
         )
         self.assertEqual(
             expected_stdout + "\n" + expected_stderr,
             real_output
         )
 
-    def test_node_and_resource(self):
+    def test_full(self):
+        expected_stdout = "expected output"
+        expected_stderr = "expected stderr"
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        call_list = [
+            mock.call(self.crm_mon_cmd()),
+            mock.call([self.path("crm_resource"), "--refresh", "--force"]),
+        ]
+        return_value_list = [
+            (self.fixture_status_xml(1, 1), "", 0),
+            (expected_stdout, expected_stderr, 0),
+        ]
+        mock_runner.run.side_effect = return_value_list
+
+        real_output = lib.resource_refresh(mock_runner, full=True)
+
+        self.assertEqual(len(return_value_list), len(call_list))
+        self.assertEqual(len(return_value_list), mock_runner.run.call_count)
+        mock_runner.run.assert_has_calls(call_list)
+        self.assertEqual(
+            expected_stdout + "\n" + expected_stderr,
+            real_output
+        )
+
+    def test_all_options(self):
         node = "test_node"
         resource = "test_resource"
         expected_stdout = "expected output"
         expected_stderr = "expected stderr"
         mock_runner = get_runner(expected_stdout, expected_stderr, 0)
 
-        real_output = lib.resource_cleanup(
-            mock_runner, resource=resource, node=node
+        real_output = lib.resource_refresh(
+            mock_runner, resource=resource, node=node, full=True
         )
 
         mock_runner.run.assert_called_once_with(
             [
                 self.path("crm_resource"),
-                "--cleanup", "--resource", resource, "--node", node
+                "--refresh", "--resource", resource, "--node", node, "--force"
             ]
         )
         self.assertEqual(
@@ -797,7 +907,7 @@ class ResourceCleanupTest(LibraryPacemakerTest):
         )
 
         assert_raise_library_error(
-            lambda: lib.resource_cleanup(mock_runner),
+            lambda: lib.resource_refresh(mock_runner),
             (
                 Severity.ERROR,
                 report_codes.CRM_MON_ERROR,
@@ -809,14 +919,14 @@ class ResourceCleanupTest(LibraryPacemakerTest):
 
         mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
 
-    def test_error_cleanup(self):
+    def test_error_refresh(self):
         expected_stdout = "some info"
         expected_stderr = "some error"
         expected_retval = 1
         mock_runner = mock.MagicMock(spec_set=CommandRunner)
         call_list = [
             mock.call(self.crm_mon_cmd()),
-            mock.call([self.path("crm_resource"), "--cleanup"]),
+            mock.call([self.path("crm_resource"), "--refresh"]),
         ]
         return_value_list = [
             (self.fixture_status_xml(1, 1), "", 0),
@@ -825,10 +935,10 @@ class ResourceCleanupTest(LibraryPacemakerTest):
         mock_runner.run.side_effect = return_value_list
 
         assert_raise_library_error(
-            lambda: lib.resource_cleanup(mock_runner),
+            lambda: lib.resource_refresh(mock_runner),
             (
                 Severity.ERROR,
-                report_codes.RESOURCE_CLEANUP_ERROR,
+                report_codes.RESOURCE_REFRESH_ERROR,
                 {
                     "reason": expected_stderr + "\n" + expected_stdout,
                 }
@@ -839,6 +949,7 @@ class ResourceCleanupTest(LibraryPacemakerTest):
         self.assertEqual(len(return_value_list), mock_runner.run.call_count)
         mock_runner.run.assert_has_calls(call_list)
 
+
 class ResourcesWaitingTest(LibraryPacemakerTest):
     def test_has_support(self):
         expected_stdout = ""
diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
index 0085b44..fcb42f1 100644
--- a/pcs/lib/reports.py
+++ b/pcs/lib/reports.py
@@ -219,18 +219,19 @@ def required_option_of_alternatives_is_missing(
     )
 
 def invalid_option(
-    option_names, allowed_options, option_type,
+    option_names, allowed_options, option_type, allowed_option_patterns=None,
     severity=ReportItemSeverity.ERROR, forceable=None
 ):
     """
-    specified option name is not valid, usualy an error or a warning
-    list option_names specified invalid option names
-    allowed_options iterable of possible allowed option names
-    option_type decsribes the option
-    severity report item severity
-    forceable is this report item forceable? by what cathegory?
-    """
+    specified option names are not valid, usualy an error or a warning
 
+    list option_names -- specified invalid option names
+    list allowed_options -- possible allowed option names
+    string option_type -- describes the option
+    list allowed_option_patterns -- allowed user defind options patterns
+    string severity -- report item severity
+    mixed forceable -- is this report item forceable? by what cathegory?
+    """
     return ReportItem(
         report_codes.INVALID_OPTION,
         severity,
@@ -239,6 +240,37 @@ def invalid_option(
             "option_names": option_names,
             "option_type": option_type,
             "allowed": sorted(allowed_options),
+            "allowed_patterns": sorted(allowed_option_patterns or []),
+        }
+    )
+
+def invalid_userdefined_options(
+    option_names, allowed_description, option_type,
+    severity=ReportItemSeverity.ERROR, forceable=None
+):
+    """
+    specified option names defined by a user are not valid
+
+    This is different than invalid_option. In this case, the options are
+    supposed to be defined by a user. This report carries information that the
+    option names do not meet requirements, i.e. contain not allowed characters.
+    Invalid_options is used when the options are predefined by pcs (or
+    underlying tools).
+
+    list option_names -- specified invalid option names
+    string allowed_description -- describes what option names should look like
+    string option_type -- describes the option
+    string severity -- report item severity
+    mixed forceable -- is this report item forceable? by what cathegory?
+    """
+    return ReportItem(
+        report_codes.INVALID_USERDEFINED_OPTIONS,
+        severity,
+        forceable,
+        info={
+            "option_names": sorted(option_names),
+            "option_type": option_type,
+            "allowed_description": allowed_description,
         }
     )
 
@@ -800,6 +832,14 @@ def corosync_quorum_get_status_error(reason):
         }
     )
 
+def corosync_quorum_heuristics_enabled_with_no_exec():
+    """
+    no exec_ is specified, therefore heuristics are effectively disabled
+    """
+    return ReportItem.warning(
+        report_codes.COROSYNC_QUORUM_HEURISTICS_ENABLED_WITH_NO_EXEC
+    )
+
 def corosync_quorum_set_expected_votes_error(reason):
     """
     unable to set expcted votes in a live cluster
@@ -1299,12 +1339,15 @@ def cib_load_error_scope_missing(scope, reason):
         }
     )
 
-def cib_load_error_invalid_format():
+def cib_load_error_invalid_format(reason):
     """
     cib does not conform to the schema
     """
     return ReportItem.error(
         report_codes.CIB_LOAD_ERROR_BAD_FORMAT,
+        info={
+            "reason": reason,
+        }
     )
 
 def cib_missing_mandatory_section(section_name):
@@ -1423,10 +1466,11 @@ def wait_for_idle_not_live_cluster():
 
 def resource_cleanup_error(reason, resource=None, node=None):
     """
-    an error occured when deleting resource history in pacemaker
-    string reason error description
-    string resource resource which has been cleaned up
-    string node node which has been cleaned up
+    An error occured when deleting resource failed operations in pacemaker
+
+    string reason -- error description
+    string resource -- resource which has been cleaned up
+    string node -- node which has been cleaned up
     """
     return ReportItem.error(
         report_codes.RESOURCE_CLEANUP_ERROR,
@@ -1437,13 +1481,31 @@ def resource_cleanup_error(reason, resource=None, node=None):
         }
     )
 
-def resource_cleanup_too_time_consuming(threshold):
+def resource_refresh_error(reason, resource=None, node=None):
+    """
+    An error occured when deleting resource history in pacemaker
+
+    string reason -- error description
+    string resource -- resource which has been cleaned up
+    string node -- node which has been cleaned up
+    """
+    return ReportItem.error(
+        report_codes.RESOURCE_REFRESH_ERROR,
+        info={
+            "reason": reason,
+            "resource": resource,
+            "node": node,
+        }
+    )
+
+def resource_refresh_too_time_consuming(threshold):
     """
-    resource cleanup will execute more than threshold operations in a cluster
-    threshold current threshold for trigerring this error
+    Resource refresh would execute more than threshold operations in a cluster
+
+    int threshold -- current threshold for trigerring this error
     """
     return ReportItem.error(
-        report_codes.RESOURCE_CLEANUP_TOO_TIME_CONSUMING,
+        report_codes.RESOURCE_REFRESH_TOO_TIME_CONSUMING,
         info={"threshold": threshold},
         forceable=report_codes.FORCE_LOAD_THRESHOLD
     )
diff --git a/pcs/lib/test/test_env.py b/pcs/lib/test/test_env.py
index 23915fb..5660cf7 100644
--- a/pcs/lib/test/test_env.py
+++ b/pcs/lib/test/test_env.py
@@ -853,12 +853,14 @@ class PushCorosyncConfFile(TestCase):
         self.config.env.set_corosync_conf_data("totem {\n    version: 2\n}\n")
 
     def test_success(self):
-        env = self.env_assistant.get_env()
         new_corosync_conf_data = "totem {\n    version: 3\n}\n"
+        self.config.env.push_corosync_conf(
+            corosync_conf_text=new_corosync_conf_data
+        )
+        env = self.env_assistant.get_env()
         env.push_corosync_conf(
             CorosyncConfigFacade.from_string(new_corosync_conf_data)
         )
-        self.assertEqual(new_corosync_conf_data, env.get_corosync_conf_data())
 
 
 class GetCorosyncConfFile(TestCase):
diff --git a/pcs/lib/test/test_resource_agent.py b/pcs/lib/test/test_resource_agent.py
index 4da4467..a8be5fc 100644
--- a/pcs/lib/test/test_resource_agent.py
+++ b/pcs/lib/test/test_resource_agent.py
@@ -1338,7 +1338,8 @@ class AgentMetadataValidateParameters(TestCase):
                             "another_required_param",
                             "required_param",
                             "test_param",
-                        ]
+                        ],
+                        "allowed_patterns": [],
                     },
                     report_codes.FORCE_OPTIONS
                 ),
@@ -1445,7 +1446,8 @@ class AgentMetadataValidateParameters(TestCase):
                         "option_type": "resource",
                         "allowed": [
                             "deprecated",
-                        ]
+                        ],
+                        "allowed_patterns": [],
                     },
                     report_codes.FORCE_OPTIONS
                 ),
diff --git a/pcs/lib/test/test_validate.py b/pcs/lib/test/test_validate.py
index 37848a2..3aea4eb 100644
--- a/pcs/lib/test/test_validate.py
+++ b/pcs/lib/test/test_validate.py
@@ -843,6 +843,29 @@ class NamesIn(TestCase):
                         "option_names": ["x", "y"],
                         "allowed": ["a", "b", "c"],
                         "option_type": "option",
+                        "allowed_patterns": [],
+                    },
+                    None
+                )
+            ]
+        )
+
+    def test_return_error_with_allowed_patterns(self):
+        assert_report_item_list_equal(
+            validate.names_in(
+                ["a", "b", "c"],
+                ["x", "y"],
+                allowed_option_patterns=["pattern"]
+            ),
+            [
+                (
+                    severities.ERROR,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_names": ["x", "y"],
+                        "allowed": ["a", "b", "c"],
+                        "option_type": "option",
+                        "allowed_patterns": ["pattern"],
                     },
                     None
                 )
@@ -865,6 +888,7 @@ class NamesIn(TestCase):
                         "option_names": ["x", "y"],
                         "allowed": ["a", "b", "c"],
                         "option_type": "option",
+                        "allowed_patterns": [],
                     },
                     None
                 )
@@ -887,6 +911,7 @@ class NamesIn(TestCase):
                         "option_names": ["x", "y"],
                         "allowed": ["a", "b", "c"],
                         "option_type": "some option",
+                        "allowed_patterns": [],
                     },
                     "FORCE_CODE"
                 )
@@ -910,6 +935,7 @@ class NamesIn(TestCase):
                         "option_names": ["x", "y"],
                         "allowed": ["a", "b", "c"],
                         "option_type": "some option",
+                        "allowed_patterns": [],
                     },
                     None
                 )
diff --git a/pcs/lib/validate.py b/pcs/lib/validate.py
index c316ea8..14a5bd4 100644
--- a/pcs/lib/validate.py
+++ b/pcs/lib/validate.py
@@ -200,7 +200,8 @@ def mutually_exclusive(mutually_exclusive_names, option_type="option"):
 
 def names_in(
     allowed_name_list, name_list, option_type="option",
-    code_to_allow_extra_names=None, allow_extra_names=False
+    code_to_allow_extra_names=None, allow_extra_names=False,
+    allowed_option_patterns=None
 ):
     """
     Return a list with report INVALID_OPTION when in name_list is a name that is
@@ -215,6 +216,7 @@ def names_in(
     bool allow_extra_names is flag that complements code_to_allow_extra_names
         and determines wheter is report INVALID_OPTION forceable error or
         warning.
+    mixed allowed_option_patterns -- option patterns to be added to a report
     """
     invalid_names = set(name_list) - set(allowed_name_list)
     if not invalid_names:
@@ -229,6 +231,7 @@ def names_in(
         sorted(invalid_names),
         sorted(allowed_name_list),
         option_type,
+        allowed_option_patterns=sorted(allowed_option_patterns or [])
     )]
 
 ### values validators
diff --git a/pcs/pcs b/pcs/pcs
index 4585fd5..736f9cd 100755
--- a/pcs/pcs
+++ b/pcs/pcs
@@ -7,6 +7,10 @@ import sys
 PACKAGE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
 sys.path.insert(0, PACKAGE_DIR)
 
-from pcs import app
+from pcs import (
+    app,
+    settings,
+)
 
+settings.pcsd_exec_location = os.path.join(PACKAGE_DIR, "pcsd")
 app.main(sys.argv[1:])
diff --git a/pcs/pcs.8 b/pcs/pcs.8
index 6c47103..15454c7 100644
--- a/pcs/pcs.8
+++ b/pcs/pcs.8
@@ -1,4 +1,4 @@
-.TH PCS "8" "November 2017" "pcs 0.9.161" "System Administration Utilities"
+.TH PCS "8" "November 2017" "pcs 0.9.162" "System Administration Utilities"
 .SH NAME
 pcs \- pacemaker/corosync configuration system
 .SH SYNOPSIS
@@ -178,7 +178,10 @@ defaults [options]
 Set default values for resources, if no options are passed, lists currently configured defaults. Defaults do not apply to resources which override them with their own defined values.
 .TP
 cleanup [<resource id>] [\fB\-\-node\fR <node>]
-Make the cluster forget the operation history of the resource and re\-detect its current state. This can be useful to purge knowledge of past failures that have since been resolved. If a resource id is not specified then all resources/stonith devices will be cleaned up. If a node is not specified then resources/stonith devices on all nodes will be cleaned up.
+Make the cluster forget failed operations from history of the resource and re\-detect its current state. This can be useful to purge knowledge of past failures that have since been resolved. If a resource id is not specified then all resources / stonith devices will be cleaned up. If a node is not specified then resources / stonith devices on all nodes will be cleaned up.
+.TP
+refresh [<resource id>] [\fB\-\-node\fR <node>] [\fB\-\-full\fR]
+Make the cluster forget the complete operation history (including failures) of the resource and re\-detect its current state. If you are interested in forgetting failed operations only, use the 'pcs resource cleanup' command. If a resource id is not specified then all resources / stonith devices will be refreshed. If a node is not specified then resources / stonith devices on all nodes will be refreshed. Use \fB\-\-full\fR to refresh a resource on all nodes, otherwise only nodes where th [...]
 .TP
 failcount show <resource id> [node]
 Show current failcount for specified resource from all nodes or only on specified node.
@@ -356,7 +359,10 @@ disable <stonith id> [\fB\-\-wait[=n]\fR]
 Attempt to stop the stonith device if it is running and disallow the cluster to use it. If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the stonith device to stop and then return 0 if the stonith device is stopped or 1 if the stonith device has not stopped. If 'n' is not specified it defaults to 60 minutes.
 .TP
 cleanup [<stonith id>] [\fB\-\-node\fR <node>]
-Make the cluster forget the operation history of the stonith device and re\-detect its current state. This can be useful to purge knowledge of past failures that have since been resolved. If a stonith id is not specified then all resources/stonith devices will be cleaned up. If a node is not specified then resources/stonith devices on all nodes will be cleaned up.
+Make the cluster forget failed operations from history of the stonith device and re\-detect its current state. This can be useful to purge knowledge of past failures that have since been resolved. If a stonith id is not specified then all resources / stonith devices will be cleaned up. If a node is not specified then resources / stonith devices on all nodes will be cleaned up.
+.TP
+refresh [<stonith id>] [\fB\-\-node\fR <node>] [\fB\-\-full\fR]
+Make the cluster forget the complete operation history (including failures) of the stonith device and re\-detect its current state. If you are interested in forgetting failed operations only, use the 'pcs stonith cleanup' command. If a stonith id is not specified then all resources / stonith devices will be refreshed. If a node is not specified then resources / stonith devices on all nodes will be refreshed. Use \fB\-\-full\fR to refresh a stonith device on all nodes, otherwise only node [...]
 .TP
 level [config]
 Lists all of the fencing levels currently configured.
@@ -600,8 +606,11 @@ Show quorum configuration.
 status
 Show quorum runtime status.
 .TP
-device add [<generic options>] model <device model> [<model options>]
-Add a quorum device to the cluster. Quorum device needs to be created first by "pcs qdevice setup" command. It is not possible to use more than one quorum device in a cluster simultaneously. Generic options, model and model options are all documented in corosync\-qdevice(8) man page.
+device add [<generic options>] model <device model> [<model options>] [heuristics <heuristics options>]
+Add a quorum device to the cluster. Quorum device needs to be created first by "pcs qdevice setup" command. It is not possible to use more than one quorum device in a cluster simultaneously. Models and options are all documented in corosync\-qdevice(8) man page; for heuristics options check the quorum.device.heuristics subkey section, for model options check the quorum.device.<device model> subkey sections.
+.TP
+device heuristics remove
+Remove all heuristics settings of the configured quorum device.
 .TP
 device remove
 Remove a quorum device from the cluster.
@@ -609,8 +618,8 @@ Remove a quorum device from the cluster.
 device status [\fB\-\-full\fR]
 Show quorum device runtime status.  Using \fB\-\-full\fR will give more detailed output.
 .TP
-device update [<generic options>] [model <model options>]
-Add/Change quorum device options.  Generic options and model options are all documented in corosync\-qdevice(8) man page. Requires the cluster to be stopped.
+device update [<generic options>] [model <model options>] [heuristics <heuristics options>]
+Add/Change quorum device options. Requires the cluster to be stopped. Model and options are all documented in corosync\-qdevice(8) man page; for heuristics options check the quorum.device.heuristics subkey section, for model options check the quorum.device.<device model> subkey sections.
 
 WARNING: If you want to change "host" option of qdevice model net, use "pcs quorum device remove" and "pcs quorum device add" commands to set up configuration properly unless old and new host is the same machine.
 .TP
@@ -825,6 +834,7 @@ no_proxy, https_proxy, all_proxy, NO_PROXY, HTTPS_PROXY, ALL_PROXY
 http://clusterlabs.org/doc/
 
 .BR pcsd (8)
+.BR pcs_snmp_agent (8)
 
 .BR corosync_overview (8),
 .BR votequorum (5),
diff --git a/pcs/quorum.py b/pcs/quorum.py
index 51469d6..10c2760 100644
--- a/pcs/quorum.py
+++ b/pcs/quorum.py
@@ -53,6 +53,8 @@ def quorum_device_cmd(lib, argv, modificators):
     try:
         if sub_cmd == "add":
             quorum_device_add_cmd(lib, argv_next, modificators)
+        elif sub_cmd == "heuristics":
+            quorum_device_heuristics_cmd(lib, argv_next, modificators)
         elif sub_cmd == "remove":
             quorum_device_remove_cmd(lib, argv_next, modificators)
         elif sub_cmd == "status":
@@ -67,6 +69,23 @@ def quorum_device_cmd(lib, argv, modificators):
             e, "quorum", "device {0}".format(sub_cmd)
         )
 
+def quorum_device_heuristics_cmd(lib, argv, modifiers):
+    if len(argv) < 1:
+        raise CmdLineInputError()
+
+    sub_cmd, argv_next = argv[0], argv[1:]
+    try:
+        if sub_cmd == "remove":
+            quorum_device_heuristics_remove_cmd(lib, argv_next, modifiers)
+        else:
+            sub_cmd = ""
+            raise CmdLineInputError()
+    except CmdLineInputError as e:
+        utils.exit_on_cmdline_input_errror(
+            e, "quorum", "device heuristics {0}".format(sub_cmd)
+        )
+
+
 def quorum_config_cmd(lib, argv, modificators):
     if argv:
         raise CmdLineInputError()
@@ -91,6 +110,7 @@ def quorum_config_to_str(config):
                 config["device"].get("generic_options", {}).items()
             )
         ]))
+
         model_settings = [
             "Model: {m}".format(m=config["device"].get("model", ""))
         ]
@@ -102,6 +122,15 @@ def quorum_config_to_str(config):
         ]))
         lines.extend(indent(model_settings))
 
+        heuristics_options = config["device"].get("heuristics_options", {})
+        if heuristics_options:
+            heuristics_settings = ["Heuristics:"]
+            heuristics_settings.extend(indent([
+                "{n}: {v}".format(n=name, v=value)
+                for name, value in sorted(heuristics_options.items())
+            ]))
+            lines.extend(indent(heuristics_settings))
+
     return lines
 
 def quorum_expected_votes_cmd(lib, argv, modificators):
@@ -125,27 +154,46 @@ def quorum_update_cmd(lib, argv, modificators):
         force=modificators["force"]
     )
 
+def _parse_quorum_device_groups(arg_list):
+    keyword_list = ["model", "heuristics"]
+    groups = parse_args.group_by_keywords(
+        arg_list,
+        set(keyword_list),
+        implicit_first_group_key="generic",
+        keyword_repeat_allowed=False,
+        only_found_keywords=True
+    )
+    for keyword in keyword_list:
+        if keyword not in groups:
+            continue
+        if len(groups[keyword]) == 0:
+            raise CmdLineInputError(
+                "No {0} options specified".format(keyword)
+            )
+    return groups
+
 def quorum_device_add_cmd(lib, argv, modificators):
+    groups = _parse_quorum_device_groups(argv)
+    model_and_model_options = groups.get("model", [])
     # we expect "model" keyword once, followed by the actual model value
-    options_lists = parse_args.split_list(argv, "model")
-    if len(options_lists) != 2:
+    if not model_and_model_options or "=" in model_and_model_options[0]:
         raise CmdLineInputError()
-    # check if model value was specified
-    if not options_lists[1] or "=" in options_lists[1][0]:
-        raise CmdLineInputError()
-    generic_options = parse_args.prepare_options(options_lists[0])
-    model = options_lists[1][0]
-    model_options = parse_args.prepare_options(options_lists[1][1:])
+
+    generic_options = parse_args.prepare_options(groups.get("generic", []))
+    model = model_and_model_options[0]
+    model_options = parse_args.prepare_options(model_and_model_options[1:])
+    heuristics_options = parse_args.prepare_options(
+        groups.get("heuristics", [])
+    )
 
     if "model" in generic_options:
-        raise CmdLineInputError(
-            "Model cannot be specified in generic options"
-        )
+        raise CmdLineInputError("Model cannot be specified in generic options")
 
     lib.quorum.add_device(
         model,
         model_options,
         generic_options,
+        heuristics_options,
         force_model=modificators["force"],
         force_options=modificators["force"],
         skip_offline_nodes=modificators["skip_offline_nodes"]
@@ -165,29 +213,31 @@ def quorum_device_status_cmd(lib, argv, modificators):
     print(lib.quorum.status_device(modificators["full"]))
 
 def quorum_device_update_cmd(lib, argv, modificators):
-    # we expect "model" keyword once
-    options_lists = parse_args.split_list(argv, "model")
-    if len(options_lists) == 1:
-        generic_options = parse_args.prepare_options(options_lists[0])
-        model_options = dict()
-    elif len(options_lists) == 2:
-        generic_options = parse_args.prepare_options(options_lists[0])
-        model_options = parse_args.prepare_options(options_lists[1])
-    else:
+    groups = _parse_quorum_device_groups(argv)
+    if not groups:
         raise CmdLineInputError()
+    generic_options = parse_args.prepare_options(groups.get("generic", []))
+    model_options = parse_args.prepare_options(groups.get("model", []))
+    heuristics_options = parse_args.prepare_options(
+        groups.get("heuristics", [])
+    )
 
     if "model" in generic_options:
-        raise CmdLineInputError(
-            "Model cannot be specified in generic options"
-        )
+        raise CmdLineInputError("Model cannot be specified in generic options")
 
     lib.quorum.update_device(
         model_options,
         generic_options,
+        heuristics_options,
         force_options=modificators["force"],
         skip_offline_nodes=modificators["skip_offline_nodes"]
     )
 
+def quorum_device_heuristics_remove_cmd(lib, argv, modifiers):
+    if argv:
+        raise CmdLineInputError()
+    lib.quorum.remove_device_heuristics()
+
 # TODO switch to new architecture, move to lib
 def quorum_unblock_cmd(argv):
     if len(argv) > 0:
diff --git a/pcs/resource.py b/pcs/resource.py
index 1a46ac7..082bd9d 100644
--- a/pcs/resource.py
+++ b/pcs/resource.py
@@ -188,6 +188,8 @@ def resource_cmd(argv):
                 )
         elif sub_cmd == "cleanup":
             resource_cleanup(argv_next)
+        elif sub_cmd == "refresh":
+            resource_refresh(argv_next)
         elif sub_cmd == "history":
             resource_history(argv_next)
         elif sub_cmd == "relocate":
@@ -2450,20 +2452,38 @@ def get_attrs(node, prepend_string = "", append_string = ""):
         return prepend_string + output.rstrip() + append_string
     return output.rstrip()
 
-def resource_cleanup(argv):
+def _parse_cleanup_refresh(argv):
     resource = None
     node = None
-
     if len(argv) > 1:
         raise CmdLineInputError()
     if argv:
         resource = argv[0]
     if "--node" in utils.pcs_options:
         node = utils.pcs_options["--node"]
-    force = "--force" in utils.pcs_options
+    return {
+        "node": node,
+        "resource": resource,
+        "force": "--force" in utils.pcs_options,
+    }
 
+def resource_cleanup(argv):
+    # --force is noop now but we must support it for backwards compatibility
+    options = _parse_cleanup_refresh(argv)
     print(lib_pacemaker.resource_cleanup(
-        utils.cmd_runner(), resource, node, force
+        utils.cmd_runner(),
+        resource=options["resource"],
+        node=options["node"]
+    ))
+
+def resource_refresh(argv):
+    options = _parse_cleanup_refresh(argv)
+    print(lib_pacemaker.resource_refresh(
+        utils.cmd_runner(),
+        resource=options["resource"],
+        node=options["node"],
+        full="--full" in utils.pcs_options,
+        force=options["force"]
     ))
 
 def resource_history(args):
diff --git a/pcs/settings_default.py b/pcs/settings_default.py
index ac2f02d..d6e8f22 100644
--- a/pcs/settings_default.py
+++ b/pcs/settings_default.py
@@ -27,7 +27,7 @@ pengine_binary = "/usr/libexec/pacemaker/pengine"
 crmd_binary = "/usr/libexec/pacemaker/crmd"
 cib_binary = "/usr/libexec/pacemaker/cib"
 stonithd_binary = "/usr/libexec/pacemaker/stonithd"
-pcs_version = "0.9.161"
+pcs_version = "0.9.162"
 crm_report = pacemaker_binaries + "crm_report"
 crm_verify = pacemaker_binaries + "crm_verify"
 crm_mon_schema = '/usr/share/pacemaker/crm_mon.rng'
@@ -53,3 +53,5 @@ pacemaker_wait_timeout_status = 62
 booth_config_dir = "/etc/booth"
 booth_binary = "/usr/sbin/booth"
 default_request_timeout = 60
+pcs_bundled_dir = "/usr/lib/pcs/bundled/"
+pcs_bundled_pacakges_dir = os.path.join(pcs_bundled_dir, "packages")
diff --git a/pcs/snmp/Makefile b/pcs/snmp/Makefile
new file mode 100644
index 0000000..ed78b2c
--- /dev/null
+++ b/pcs/snmp/Makefile
@@ -0,0 +1,46 @@
+PYAGENTX_VERSION="0.4.pcs.1"
+PYAGENTX_URI="https://github.com/ondrejmular/pyagentx/archive/v${PYAGENTX_VERSION}.tar.gz"
+
+ifndef PYTHON
+	PYTHON := $(shell which python3 || which python2 || which python)
+endif
+
+ifndef PREFIX
+  PREFIX=$(shell prefix=`$(PYTHON) -c "import sys; print(sys.prefix)"` || prefix="/usr"; echo $$prefix)
+endif
+
+ifndef PCS_PARENT_DIR
+  PCS_PARENT_DIR=${DESTDIR}/${PREFIX}/lib/pcs
+endif
+
+ifndef BUNDLED_LIB_DIR
+  BUNDLED_LIB_DIR=../bundled/
+endif
+ifndef BUNDLED_LIB_DIR_ABS
+  BUNDLED_LIB_DIR_ABS=$(shell readlink -f ${BUNDLED_LIB_DIR})
+endif
+BUNDLES_TMP_DIR=${BUNDLED_LIB_DIR_ABS}/tmp
+
+PYAGENTX_PROVIDED=true
+ifndef PYAGENTX_DIR
+  PYAGENTX_PROVIDED=false
+  PYAGENTX_DIR=${BUNDLES_TMP_DIR}/pyagentx-${PYAGENTX_VERSION}
+endif
+
+get_pyagentx:
+ifeq (${PYAGENTX_PROVIDED},false)
+	rm -rf ${PYAGENTX_DIR}
+	mkdir -p ${BUNDLES_TMP_DIR}
+	wget -qO- ${PYAGENTX_URI} | tar xvz -C ${BUNDLES_TMP_DIR}
+endif
+
+build_pyagentx: get_pyagentx
+ifdef PYAGENTX_DIR
+	cd ${PYAGENTX_DIR} && PYTHONPATH=${BUNDLED_LIB_DIR_ABS}/packages/ $(PYTHON) setup.py install --install-purelib /packages/ --root ${BUNDLED_LIB_DIR_ABS}
+endif
+
+build_bundled_libs: build_pyagentx
+
+clean:
+	rm -rf ${BUNDLES_TMP_DIR}/pyagentx*
+	rm -rf ${BUNDLED_LIB_DIR_ABS}/packages/pyagentx*
diff --git a/pcs/snmp/__init__.py b/pcs/snmp/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/snmp/agentx/__init__.py b/pcs/snmp/agentx/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/snmp/agentx/pcs_pyagentx.py b/pcs/snmp/agentx/pcs_pyagentx.py
new file mode 100644
index 0000000..a5a9bf1
--- /dev/null
+++ b/pcs/snmp/agentx/pcs_pyagentx.py
@@ -0,0 +1,15 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+)
+
+import sys
+
+from pcs import settings
+
+# add bundled lib path to python path
+if settings.pcs_bundled_pacakges_dir not in sys.path:
+    sys.path.insert(0, settings.pcs_bundled_pacakges_dir)
+
+from pyagentx import *
diff --git a/pcs/snmp/agentx/types.py b/pcs/snmp/agentx/types.py
new file mode 100644
index 0000000..b0f487b
--- /dev/null
+++ b/pcs/snmp/agentx/types.py
@@ -0,0 +1,48 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+)
+
+from collections import namedtuple
+
+import pcs.snmp.agentx.pcs_pyagentx as pyagentx
+
+
+BaseType = namedtuple("BaseType", ["data_type", "value"])
+
+
+class IntegerType(BaseType):
+    def __new__(cls, value):
+        return super(IntegerType, cls).__new__(
+            cls, data_type=pyagentx.TYPE_INTEGER, value=value
+        )
+
+
+class StringType(BaseType):
+    def __new__(cls, value):
+        return super(StringType, cls).__new__(
+            cls, data_type=pyagentx.TYPE_OCTETSTRING, value=value
+        )
+
+
+class Oid(
+    namedtuple("OidBase", ["oid", "str_oid", "data_type", "member_list"])
+):
+    """
+    This class represents one entity in OID tree. It is possible to define MIB
+    tree model for translating string (human friendly) oid into numbered oid
+    used in SNMP.
+
+    oid int -- unique oid identificator on a given layer
+    str_oid string -- string oid identificator in a given layer
+    data_type BaseType -- class inherited from BaseType, data type of entity
+      this class represents
+    member_list list of Oid -- list of members/descendants of this entity.
+      If set, this entity is threated as object identifier in MIB  and data_type
+      is ignored.
+    """
+    def __new__(cls, oid, str_oid, data_type=None, member_list=None):
+        return super(Oid, cls).__new__(
+            cls, oid, str_oid, data_type, member_list
+        )
diff --git a/pcs/snmp/agentx/updater.py b/pcs/snmp/agentx/updater.py
new file mode 100644
index 0000000..7ac20ae
--- /dev/null
+++ b/pcs/snmp/agentx/updater.py
@@ -0,0 +1,98 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+)
+
+from pcs.snmp.agentx.pcs_pyagentx import Updater
+
+
+class AgentxUpdaterBase(Updater):
+    """
+    Base class for SNMP angent updaters. It provides methods for comfortable
+    setting of values provided by the agent.
+    """
+
+    # this has to be set by the descendants
+    _oid_tree = None
+
+    @property
+    def oid_tree(self):
+        return self._oid_tree
+
+    def _set_val(self, data_type, oid, value):
+        self._data[oid] = {'name': oid, 'type': data_type, 'value': value}
+
+    def _set_value_list(self, data_type, oid, value):
+        if not isinstance(value, list):
+            value = [value]
+        for index, val in enumerate(value):
+            self._set_val(
+                data_type, "{oid}.{index}".format(oid=oid, index=index), val
+            )
+
+    def set_typed_value(self, oid, value):
+        """
+        oid string -- oid in the number form
+        value BaseType -- BaseType object filled with value to set. Value can be
+          either primitive value or list of primitive values.
+        """
+        self._set_value_list(value.data_type, oid, value.value)
+
+    def set_value(self, str_oid, value):
+        """
+        str_oid string -- string form of oid. Raw (number form) oid will be
+          figured out based on oid_tree tree.
+        value primitive value or list of primitive values -- value to be set on
+          specified str_oid
+        """
+        oid, oid_cls = _str_oid_to_oid(self.oid_tree, str_oid)
+        self.set_typed_value(oid, oid_cls.data_type(value))
+
+    def set_table(self, oid, table):
+        """
+        oid string -- number form of oid
+        table list of list of BaseType -- members of outer list represent rows
+          of table and members of inner list are columns.
+        """
+        for row in table:
+            if not row:
+                continue
+            row_id = _str_to_oid(str(row[0].value))
+            for index, col in enumerate(row[1:], start=2):
+                value_oid = "{base_oid}.{index}.{row_id}".format(
+                    base_oid=oid, index=index, row_id=row_id
+                )
+                self._set_val(col.data_type, value_oid, col.value)
+
+
+def _find_oid_in_sub_tree(sub_tree, section_name):
+    if sub_tree.member_list is None:
+        return None
+    for oid in sub_tree.member_list:
+        if oid.str_oid == section_name:
+            return oid
+    return None
+
+
+def _str_oid_to_oid(sub_tree, str_oid):
+    sections = str_oid.split(".")
+    oid_list = []
+    for section in sections:
+        sub_tree = _find_oid_in_sub_tree(sub_tree, section)
+        if sub_tree is None:
+            raise AssertionError(
+                "oid section {0} ({1}) not found in {1} ({2})".format(
+                    section, str_oid, sub_tree.str_oid
+                )
+            )
+        oid_list.append(str(sub_tree.oid))
+        if sub_tree.data_type:
+            oid = ".".join(oid_list)
+            return (oid, sub_tree)
+
+
+def _str_to_oid(data):
+    length = len(data)
+    oid_int = [str(ord(i)) for i in data]
+    return str(length) + '.' + '.'.join(oid_int)
diff --git a/pcs/snmp/mibs/PCMK-PCS-MIB.txt b/pcs/snmp/mibs/PCMK-PCS-MIB.txt
new file mode 100644
index 0000000..360d91f
--- /dev/null
+++ b/pcs/snmp/mibs/PCMK-PCS-MIB.txt
@@ -0,0 +1,16 @@
+PACEMAKER-PCS-MIB DEFINITIONS ::= BEGIN
+
+IMPORTS
+    pacemaker FROM PACEMAKER-MIB
+    MODULE-IDENTITY FROM SNMPv2-SMI;
+
+pcmkPcs MODULE-IDENTITY
+    LAST-UPDATED "201709260000Z"
+    ORGANIZATION "www.clusterlabs.org"
+    CONTACT-INFO "email: users at clusterlabs.org"
+    DESCRIPTION  "Pacemaker/corosync cluster MIB"
+    REVISION     "201709260000Z"
+    DESCRIPTION  "initial MIB version"
+    ::= { pacemaker 100 }
+END
+
diff --git a/pcs/snmp/mibs/PCMK-PCS-V1-MIB.txt b/pcs/snmp/mibs/PCMK-PCS-V1-MIB.txt
new file mode 100644
index 0000000..9913ffd
--- /dev/null
+++ b/pcs/snmp/mibs/PCMK-PCS-V1-MIB.txt
@@ -0,0 +1,218 @@
+PACEMAKER-PCS-V1-MIB DEFINITIONS ::= BEGIN
+
+IMPORTS
+    pcmkPcs FROM PACEMAKER-PCS-MIB
+    MODULE-IDENTITY, OBJECT-TYPE, Integer32 FROM SNMPv2-SMI
+    MODULE-COMPLIANCE, OBJECT-GROUP FROM SNMPv2-CONF;
+
+pcmkPcsV1 MODULE-IDENTITY
+    LAST-UPDATED "201709260000Z"
+    ORGANIZATION "www.clusterlabs.org"
+    CONTACT-INFO "email: users at clusterlabs.org"
+    DESCRIPTION  "Pacemaker/corosync cluster MIB, data version 1"
+    REVISION     "201709260000Z"
+    DESCRIPTION  "initial version"
+    ::= { pcmkPcs 1 }
+
+pcmkPcsV1Cluster OBJECT IDENTIFIER ::= { pcmkPcsV1 1 }
+pcmkPcsV1Conformance OBJECT IDENTIFIER ::= { pcmkPcsV1 2 }
+
+--  #####  Cluster  #####  --
+
+pcmkPcsV1ClusterName OBJECT-TYPE
+    SYNTAX      OCTET STRING
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION "Cluster name"
+    ::= { pcmkPcsV1Cluster 1 }
+
+pcmkPcsV1ClusterQuorate OBJECT-TYPE
+    SYNTAX      Integer32
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION "1 if cluster is quorate, 0 otherwise"
+    ::= { pcmkPcsV1Cluster 2 }
+
+pcmkPcsV1ClusterNodesNum OBJECT-TYPE
+    SYNTAX      Integer32
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION "Number of nodes configured to participate in cluster"
+    ::= { pcmkPcsV1Cluster 3 }
+
+pcmkPcsV1ClusterNodesNames OBJECT-TYPE
+    SYNTAX      OCTET STRING
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION "Names of nodes configured to participate in cluster"
+    ::= { pcmkPcsV1Cluster 4 }
+
+pcmkPcsV1ClusterCorosyncNodesOnlineNum OBJECT-TYPE
+    SYNTAX      Integer32
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION ""
+    ::= { pcmkPcsV1Cluster 5 }
+
+pcmkPcsV1ClusterCorosyncNodesOnlineNames OBJECT-TYPE
+    SYNTAX      OCTET STRING
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION ""
+    ::= { pcmkPcsV1Cluster 6 }
+
+pcmkPcsV1ClusterCorosyncNodesOfflineNum OBJECT-TYPE
+    SYNTAX      Integer32
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION ""
+    ::= { pcmkPcsV1Cluster 7 }
+
+pcmkPcsV1ClusterCorosyncNodesOfflineNames OBJECT-TYPE
+    SYNTAX      OCTET STRING
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION ""
+    ::= { pcmkPcsV1Cluster 8 }
+
+pcmkPcsV1ClusterPcmkNodesOnlineNum OBJECT-TYPE
+    SYNTAX      Integer32
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION ""
+    ::= { pcmkPcsV1Cluster 9 }
+
+pcmkPcsV1ClusterPcmkNodesOnlineNames OBJECT-TYPE
+    SYNTAX      OCTET STRING
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION ""
+    ::= { pcmkPcsV1Cluster 10 }
+
+pcmkPcsV1ClusterPcmkNodesStandbyNum OBJECT-TYPE
+    SYNTAX      Integer32
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION ""
+    ::= { pcmkPcsV1Cluster 11 }
+
+pcmkPcsV1ClusterPcmkNodesStandbyNames OBJECT-TYPE
+    SYNTAX      OCTET STRING
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION ""
+    ::= { pcmkPcsV1Cluster 12 }
+
+pcmkPcsV1ClusterPcmkNodesOfflineNum OBJECT-TYPE
+    SYNTAX      Integer32
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION ""
+    ::= { pcmkPcsV1Cluster 13 }
+
+pcmkPcsV1ClusterPcmkNodesOfflineNames OBJECT-TYPE
+    SYNTAX      OCTET STRING
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION ""
+    ::= { pcmkPcsV1Cluster 14 }
+
+pcmkPcsV1ClusterAllResourcesNum OBJECT-TYPE
+    SYNTAX      Integer32
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION ""
+    ::= { pcmkPcsV1Cluster 15 }
+
+pcmkPcsV1ClusterAllResourcesIds OBJECT-TYPE
+    SYNTAX      OCTET STRING
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION ""
+    ::= { pcmkPcsV1Cluster 16 }
+
+pcmkPcsV1ClusterRunningResourcesNum OBJECT-TYPE
+    SYNTAX      Integer32
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION ""
+    ::= { pcmkPcsV1Cluster 17 }
+
+pcmkPcsV1ClusterRunningResourcesIds OBJECT-TYPE
+    SYNTAX      OCTET STRING
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION ""
+    ::= { pcmkPcsV1Cluster 18 }
+
+pcmkPcsV1ClusterStoppedResroucesNum OBJECT-TYPE
+    SYNTAX      Integer32
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION ""
+    ::= { pcmkPcsV1Cluster 19 }
+
+pcmkPcsV1ClusterStoppedResroucesIds OBJECT-TYPE
+    SYNTAX      OCTET STRING
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION ""
+    ::= { pcmkPcsV1Cluster 20 }
+
+pcmkPcsV1ClusterFailedResourcesNum OBJECT-TYPE
+    SYNTAX      Integer32
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION ""
+    ::= { pcmkPcsV1Cluster 21 }
+
+pcmkPcsV1ClusterFailedResourcesIds OBJECT-TYPE
+    SYNTAX      OCTET STRING
+    MAX-ACCESS  read-only
+    STATUS      current
+    DESCRIPTION ""
+    ::= { pcmkPcsV1Cluster 22 }
+
+-- COMPLIANCE
+
+pcmkPcsV1ConformanceCompliances OBJECT IDENTIFIER ::= { pcmkPcsV1Conformance 1 }
+
+pcmkPcsV1ConformanceGroups OBJECT IDENTIFIER ::= { pcmkPcsV1Conformance 2 }
+
+pcmkPcsV1ConformanceCompliance MODULE-COMPLIANCE
+    STATUS      current
+    DESCRIPTION "Clustering Compliance Information"
+    MODULE     -- this module
+    MANDATORY-GROUPS { pcmkPcsV1ConformanceObjectGroup }
+    ::= { pcmkPcsV1ConformanceCompliances 1 }
+
+pcmkPcsV1ConformanceObjectGroup OBJECT-GROUP
+    OBJECTS {
+        pcmkPcsV1ClusterName,
+        pcmkPcsV1ClusterQuorate,
+        pcmkPcsV1ClusterNodesNum,
+        pcmkPcsV1ClusterNodesNames,
+        pcmkPcsV1ClusterCorosyncNodesOnlineNum,
+        pcmkPcsV1ClusterCorosyncNodesOnlineNames,
+        pcmkPcsV1ClusterCorosyncNodesOfflineNum,
+        pcmkPcsV1ClusterCorosyncNodesOfflineNames,
+        pcmkPcsV1ClusterPcmkNodesOnlineNum,
+        pcmkPcsV1ClusterPcmkNodesOnlineNames,
+        pcmkPcsV1ClusterPcmkNodesStandbyNum,
+        pcmkPcsV1ClusterPcmkNodesStandbyNames,
+        pcmkPcsV1ClusterPcmkNodesOfflineNum,
+        pcmkPcsV1ClusterPcmkNodesOfflineNames,
+        pcmkPcsV1ClusterAllResourcesNum,
+        pcmkPcsV1ClusterAllResourcesIds,
+        pcmkPcsV1ClusterRunningResourcesNum,
+        pcmkPcsV1ClusterRunningResourcesIds,
+        pcmkPcsV1ClusterStoppedResroucesNum,
+        pcmkPcsV1ClusterStoppedResroucesIds,
+        pcmkPcsV1ClusterFailedResourcesNum,
+        pcmkPcsV1ClusterFailedResourcesIds
+    }
+    STATUS current
+    DESCRIPTION "Cluster objects"
+    ::= { pcmkPcsV1ConformanceGroups 1 }
+
+END
diff --git a/pcs/snmp/pcs_snmp_agent b/pcs/snmp/pcs_snmp_agent
new file mode 100755
index 0000000..ead3e23
--- /dev/null
+++ b/pcs/snmp/pcs_snmp_agent
@@ -0,0 +1,18 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+PACKAGE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__)
+)))
+sys.path.insert(0, PACKAGE_DIR)
+
+import pcs
+
+pcs.settings.pcs_bundled_pacakges_dir = os.path.join(
+    PACKAGE_DIR, "pcs/bundled/packages"
+)
+pcs.settings.pcsd_exec_location = os.path.join(PACKAGE_DIR, "pcsd")
+pcs.snmp.pcs_snmp_agent.main(sys.argv)
diff --git a/pcs/snmp/pcs_snmp_agent.8 b/pcs/snmp/pcs_snmp_agent.8
new file mode 100644
index 0000000..dd6e4cc
--- /dev/null
+++ b/pcs/snmp/pcs_snmp_agent.8
@@ -0,0 +1,29 @@
+.TH PCS_SNMP_AGENT "8" "November 2017" "pcs-snmp 0.9.162" "System Administration Utilities"
+.SH NAME
+pcs_snmp_agent \- a SNMP agent providing data about a corosync/pacemaker cluster
+
+.SH DESCRIPTION
+pcs_snmp_agent is a SNMP agent which connects to the master agent (snmpd) via agentx protocol and provides information about a corosync/pacemaker cluster.
+
+.SH SETUP
+pcs_snmp_agent doesn't work as a standalone agent. It only provides data to a master agent. To configure a snmpd daemon (from package net\-snmp) as the master agent, add line 'master agentx' into the snmpd configuration file (see snmp.conf(5)). Then master agent has to be configured to accepts requests for pcs defined MIBs. This can be done by adding line 'view systemview included .1.3.6.1.4.1.32723.100' into the snmpd config file.
+
+.SH MIB DATA VERSIONS
+.TP
+.B V1 \- REDHAT\-CLUSTER\-PCS\-V1\-MIB
+Provides basic information about cluster such as cluster name, list of cluster nodes and list of primitive resources.
+
+.SH ENVIRONMENT
+.TP
+.B PCS_SNMP_AGENT_DEBUG=<boolean>
+Set to \fBtrue\fR for advanced debugging information.
+.TP
+.B PCS_SNMP_AGENT_UPDATE_INTERVAL=<integer>
+Time interval in seconds after which agent will update provided data.
+
+.SH SEE ALSO
+.BR pcs (8)
+.BR pcsd (8)
+
+.BR snmpd (8)
+.BR snmp.conf (5)
diff --git a/pcs/snmp/pcs_snmp_agent.conf b/pcs/snmp/pcs_snmp_agent.conf
new file mode 100644
index 0000000..0fc0c20
--- /dev/null
+++ b/pcs/snmp/pcs_snmp_agent.conf
@@ -0,0 +1,7 @@
+# pcs snmp agent configuration file
+
+# Enable logging of dubugging messages
+#PCS_SNMP_AGENT_DEBUG=false
+
+# Data update interval in seconds
+#PCS_SNMP_AGENT_UPDATE_INTERVAL=30
diff --git a/pcs/snmp/pcs_snmp_agent.logrotate b/pcs/snmp/pcs_snmp_agent.logrotate
new file mode 100644
index 0000000..a53c21f
--- /dev/null
+++ b/pcs/snmp/pcs_snmp_agent.logrotate
@@ -0,0 +1,10 @@
+/var/log/pcs/snmp/pcs_snmp_agent.log {
+    rotate 5
+    weekly
+    missingok
+    notifempty
+    compress
+    delaycompress
+    copytruncate
+    create 0600 root root
+}
diff --git a/pcs/snmp/pcs_snmp_agent.py b/pcs/snmp/pcs_snmp_agent.py
new file mode 100644
index 0000000..74171d2
--- /dev/null
+++ b/pcs/snmp/pcs_snmp_agent.py
@@ -0,0 +1,92 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+)
+
+import os
+import sys
+import logging
+import logging.handlers
+
+import pcs.utils
+from pcs.snmp import settings
+from pcs.snmp.agentx import pcs_pyagentx as pyagentx
+from pcs.snmp.updaters.v1 import ClusterPcsV1Updater
+
+
+logger = logging.getLogger("pcs.snmp")
+logger.addHandler(logging.NullHandler())
+
+
+def is_debug():
+    debug = os.environ.get("PCS_SNMP_AGENT_DEBUG", "")
+    return debug.lower() in ["true", "on", "1"]
+
+
+def get_update_interval():
+    interval = os.environ.get("PCS_SNMP_AGENT_UPDATE_INTERVAL")
+    if not interval:
+        return settings.DEFAULT_UPDATE_INTERVAL
+
+    def _log_invalid_value(_value):
+        logger.warning(
+            "Invalid update interval value: '%s' is not >= 1.0", str(_value)
+        )
+        logger.debug(
+            "Using default update interval: %s",
+            str(settings.DEFAULT_UPDATE_INTERVAL),
+        )
+
+    try:
+        interval = float(interval)
+    except ValueError:
+        _log_invalid_value(interval)
+        return settings.DEFAULT_UPDATE_INTERVAL
+    if interval <= 1.0:
+        _log_invalid_value(interval)
+        return settings.DEFAULT_UPDATE_INTERVAL
+    return interval
+
+
+def setup_logging(debug=False):
+    level = logging.INFO
+    if debug:
+        level = logging.DEBUG
+        # this is required to enable debug also in the ruby code
+        # key '--debug' has to be added
+        pcs.utils.pcs_options["--debug"] = debug
+    formatter = logging.Formatter(
+        "%(asctime)s - %(name)s - %(levelname)s - %(message)s"
+    )
+    handler = logging.handlers.WatchedFileHandler(
+        settings.LOG_FILE, encoding="utf8"
+    )
+    handler.setLevel(level)
+    handler.setFormatter(formatter)
+    for logger_name in ["pyagentx", "pcs"]:
+        logger_instance = logging.getLogger(logger_name)
+        logger_instance.setLevel(level)
+        logger_instance.addHandler(handler)
+
+
+class PcsAgent(pyagentx.Agent):
+    def setup(self):
+        update_interval = get_update_interval()
+        logger.info("Update interval set to: %s", str(update_interval))
+        self.register(
+            settings.PCS_OID + ".1", ClusterPcsV1Updater, freq=update_interval,
+        )
+
+
+def main():
+    setup_logging(is_debug())
+    try:
+        agent = PcsAgent()
+        agent.start()
+    except Exception as e:
+        print("Unhandled exception: {0}".format(str(e)))
+        agent.stop()
+        sys.exit(1)
+    except KeyboardInterrupt:
+        agent.stop()
diff --git a/pcs/snmp/pcs_snmp_agent.service b/pcs/snmp/pcs_snmp_agent.service
new file mode 100644
index 0000000..112da73
--- /dev/null
+++ b/pcs/snmp/pcs_snmp_agent.service
@@ -0,0 +1,12 @@
+[Unit]
+Description=SNMP agent for pacemaker cluster
+Requires=snmpd.service
+
+[Service]
+EnvironmentFile=/etc/sysconfig/pcs_snmp_agent
+ExecStart=/usr/lib/pcs/pcs_snmp_agent > /dev/null
+Type=simple
+TimeoutSec=500
+
+[Install]
+WantedBy=multi-user.target
diff --git a/pcs/snmp/settings.py b/pcs/snmp/settings.py
new file mode 100644
index 0000000..0559446
--- /dev/null
+++ b/pcs/snmp/settings.py
@@ -0,0 +1,11 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+)
+
+LOG_FILE = "/var/log/pcs/pcs_snmp_agent.log"
+ENTERPRISES_OID = "1.3.6.1.4.1"
+PACEMAKER_OID = ENTERPRISES_OID + ".32723"
+PCS_OID = PACEMAKER_OID + ".100"
+DEFAULT_UPDATE_INTERVAL = 30
diff --git a/pcs/snmp/updaters/__init__.py b/pcs/snmp/updaters/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/snmp/updaters/v1.py b/pcs/snmp/updaters/v1.py
new file mode 100644
index 0000000..33e171a
--- /dev/null
+++ b/pcs/snmp/updaters/v1.py
@@ -0,0 +1,209 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+)
+
+import logging
+
+from pcs.utils import run_pcsdcli
+from pcs.snmp.agentx.updater import AgentxUpdaterBase
+from pcs.snmp.agentx.types import (
+    IntegerType,
+    StringType,
+    Oid,
+)
+
+logger = logging.getLogger("pcs.snmp.updaters.v1")
+logger.addHandler(logging.NullHandler())
+
+_cluster_v1_oid_tree = Oid(
+    1, "pcmkPcsV1Cluster", member_list=[
+        Oid(1, "pcmkPcsV1ClusterName", StringType),
+        Oid(2, "pcmkPcsV1ClusterQuorate", IntegerType),
+        Oid(3, "pcmkPcsV1ClusterNodesNum", IntegerType),
+        Oid(4, "pcmkPcsV1ClusterNodesNames", StringType),
+        Oid(5, "pcmkPcsV1ClusterCorosyncNodesOnlineNum", IntegerType),
+        Oid(6, "pcmkPcsV1ClusterCorosyncNodesOnlineNames", StringType),
+        Oid(7, "pcmkPcsV1ClusterCorosyncNodesOfflineNum", IntegerType),
+        Oid(8, "pcmkPcsV1ClusterCorosyncNodesOfflineNames", StringType),
+        Oid(9, "pcmkPcsV1ClusterPcmkNodesOnlineNum", IntegerType),
+        Oid(10, "pcmkPcsV1ClusterPcmkNodesOnlineNames", StringType),
+        Oid(11, "pcmkPcsV1ClusterPcmkNodesStandbyNum", IntegerType),
+        Oid(12, "pcmkPcsV1ClusterPcmkNodesStandbyNames", StringType),
+        Oid(13, "pcmkPcsV1ClusterPcmkNodesOfflineNum", IntegerType),
+        Oid(14, "pcmkPcsV1ClusterPcmkNodesOfflineNames", StringType),
+        Oid(15, "pcmkPcsV1ClusterAllResourcesNum", IntegerType),
+        Oid(16, "pcmkPcsV1ClusterAllResourcesIds", StringType),
+        Oid(17, "pcmkPcsV1ClusterRunningResourcesNum", IntegerType),
+        Oid(18, "pcmkPcsV1ClusterRunningResourcesIds", StringType),
+        Oid(19, "pcmkPcsV1ClusterStoppedResourcesNum", IntegerType),
+        Oid(20, "pcmkPcsV1ClusterStoppedResourcesIds", StringType),
+        Oid(21, "pcmkPcsV1ClusterFailedResourcesNum", IntegerType),
+        Oid(22, "pcmkPcsV1ClusterFailedResourcesIds", StringType),
+    ]
+)
+
+
+class ClusterPcsV1Updater(AgentxUpdaterBase):
+    _oid_tree = Oid(0, "pcs_v1", member_list=[_cluster_v1_oid_tree])
+    def update(self):
+        output, ret_val = run_pcsdcli("node_status")
+        if ret_val != 0 or output["status"] != "ok":
+            logger.error(
+                "Unable to obtain cluster status.\nPCSD return code: %s\n"
+                "PCSD output: %s\n",
+                ret_val,
+                output
+            )
+            return
+        data = output["data"]
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterName",
+            data.get("cluster_name", "")
+        )
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterQuorate",
+            _bool_to_int(data.get("node", {}).get("quorum"))
+        )
+
+        # nodes
+        known_nodes = data.get("known_nodes", [])
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterNodesNum",
+            len(known_nodes)
+        )
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterNodesNames",
+            known_nodes
+        )
+
+        corosync_nodes_online = data.get("corosync_online")
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterCorosyncNodesOnlineNum",
+            len(corosync_nodes_online)
+        )
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterCorosyncNodesOnlineNames",
+            corosync_nodes_online
+        )
+
+        corosync_nodes_offline = data.get("corosync_offline")
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterCorosyncNodesOfflineNum",
+            len(corosync_nodes_offline)
+        )
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterCorosyncNodesOfflineNames",
+            corosync_nodes_offline
+        )
+
+        pcmk_nodes_online = data.get("pacemaker_online")
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterPcmkNodesOnlineNum",
+            len(pcmk_nodes_online)
+        )
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterPcmkNodesOnlineNames",
+            pcmk_nodes_online
+        )
+
+        pcmk_nodes_standby = data.get("pacemaker_standby")
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterPcmkNodesStandbyNum",
+            len(pcmk_nodes_standby)
+        )
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterPcmkNodesStandbyNames",
+            pcmk_nodes_standby
+        )
+
+        pcmk_nodes_offline = data.get("pacemaker_offline")
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterPcmkNodesOfflineNum",
+            len(pcmk_nodes_offline)
+        )
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterPcmkNodesOfflineNames",
+            pcmk_nodes_offline
+        )
+
+        # resources
+        primitive_list = []
+        for resource in data.get("resource_list", []):
+            primitive_list.extend(_get_primitives(resource))
+
+        primitive_id_list = _get_resource_id_list(primitive_list)
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterAllResourcesNum",
+            len(primitive_id_list)
+        )
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterAllResourcesIds",
+            primitive_id_list
+        )
+        running_primitive_id_list = _get_resource_id_list(
+            primitive_list, _res_in_status(["running"])
+        )
+
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterRunningResourcesNum",
+            len(running_primitive_id_list)
+        )
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterRunningResourcesIds",
+            running_primitive_id_list
+        )
+
+        disabled_primitive_id_list = _get_resource_id_list(
+            primitive_list, _res_in_status(["disabled"])
+        )
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterStoppedResourcesNum",
+            len(disabled_primitive_id_list)
+        )
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterStoppedResourcesIds",
+            disabled_primitive_id_list
+        )
+
+        failed_primitive_id_list = _get_resource_id_list(
+            primitive_list,
+            lambda res: not _res_in_status(["running", "disabled"])(res)
+        )
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterFailedResourcesNum",
+            len(failed_primitive_id_list)
+        )
+        self.set_value(
+            "pcmkPcsV1Cluster.pcmkPcsV1ClusterFailedResourcesIds",
+            failed_primitive_id_list
+        )
+
+
+def _bool_to_int(value):
+    return 1 if value else 0
+
+
+def _get_primitives(resource):
+    res_type = resource["class_type"]
+    if res_type == "primitive":
+        return [resource]
+    if res_type == "group":
+        primitive_list = []
+        for primitive in resource["members"]:
+            primitive_list.extend(_get_primitives(primitive))
+        return primitive_list
+    # check master-slave type
+    if res_type in ["clone", "master"]:
+        return _get_primitives(resource["member"])
+
+
+def _get_resource_id_list(resource_list, predicate=None):
+    if predicate is None:
+        predicate = lambda _: True
+    return [resource["id"] for resource in resource_list if predicate(resource)]
+
+
+def _res_in_status(status_list):
+    return lambda res: res["status"] in status_list
diff --git a/pcs/status.py b/pcs/status.py
index b2e65ec..ec10d61 100644
--- a/pcs/status.py
+++ b/pcs/status.py
@@ -14,6 +14,7 @@ from pcs import (
 )
 from pcs.qdevice import qdevice_status_cmd
 from pcs.quorum import quorum_status_cmd
+from pcs.cli.common.console_report import indent
 from pcs.cli.common.errors import CmdLineInputError
 from pcs.lib.errors import LibraryError
 from pcs.lib.pacemaker.state import ClusterState
@@ -103,6 +104,15 @@ def full_status():
 
     print(output)
 
+    if "--full" in utils.pcs_options:
+        tickets, retval = utils.run(["crm_ticket", "-L"])
+        if retval != 0:
+            print("WARNING: Unable to get information about tickets")
+            print()
+        elif tickets:
+            print("Tickets:")
+            print("\n".join(indent(tickets.split("\n"))))
+
     if not utils.usefile:
         if  "--full" in utils.pcs_options and utils.hasCorosyncConf():
             print_pcsd_daemon_status()
diff --git a/pcs/stonith.py b/pcs/stonith.py
index 6408285..7e4327e 100644
--- a/pcs/stonith.py
+++ b/pcs/stonith.py
@@ -69,6 +69,8 @@ def stonith_cmd(argv):
             stonith_fence(argv_next)
         elif sub_cmd == "cleanup":
             resource.resource_cleanup(argv_next)
+        elif sub_cmd == "refresh":
+            resource.resource_refresh(argv_next)
         elif sub_cmd == "confirm":
             stonith_confirm(argv_next)
         elif sub_cmd == "get_fence_agent_info":
diff --git a/pcs/test/curl_test.py b/pcs/test/curl_test.py
index 094cdea..a12bcb2 100644
--- a/pcs/test/curl_test.py
+++ b/pcs/test/curl_test.py
@@ -20,7 +20,7 @@ from pcs import utils
 
 logger_handler = logging.StreamHandler()
 logger_handler.setFormatter(logging.Formatter('%(levelname)s: %(message)s'))
-logger = logging.getLogger("old_cli")
+logger = logging.getLogger("pcs")
 logger.setLevel(logging.DEBUG)
 logger.addHandler(logger_handler)
 
diff --git a/pcs/test/resources/corosync-3nodes-qdevice-heuristics.conf b/pcs/test/resources/corosync-3nodes-qdevice-heuristics.conf
new file mode 100644
index 0000000..66d794f
--- /dev/null
+++ b/pcs/test/resources/corosync-3nodes-qdevice-heuristics.conf
@@ -0,0 +1,44 @@
+totem {
+    version: 2
+    secauth: off
+    cluster_name: test99
+    transport: udpu
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+
+    node {
+        ring0_addr: rh7-3
+        nodeid: 3
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+
+    device {
+        model: net
+
+        net {
+            host: 127.0.0.1
+        }
+
+        heuristics {
+          mode: on
+          exec_ls: /usr/bin/test -f /tmp/test
+        }
+    }
+}
+
+logging {
+    to_syslog: yes
+}
diff --git a/pcs/test/resources/qdevice-certs/final-certificate.pk12 b/pcs/test/resources/qdevice-certs/final-certificate.pk12
new file mode 100644
index 0000000..5a581a2
--- /dev/null
+++ b/pcs/test/resources/qdevice-certs/final-certificate.pk12
@@ -0,0 +1 @@
+final qdevice certificate to be imported on all nodes
diff --git a/pcs/test/resources/qdevice-certs/qdevice-cert-request.crq b/pcs/test/resources/qdevice-certs/qdevice-cert-request.crq
new file mode 100644
index 0000000..ae6f144
--- /dev/null
+++ b/pcs/test/resources/qdevice-certs/qdevice-cert-request.crq
@@ -0,0 +1 @@
+qdevice certificate request
diff --git a/pcs/test/resources/qdevice-certs/qnetd-cacert.crt b/pcs/test/resources/qdevice-certs/qnetd-cacert.crt
index 34dcab0..f6badf1 100644
--- a/pcs/test/resources/qdevice-certs/qnetd-cacert.crt
+++ b/pcs/test/resources/qdevice-certs/qnetd-cacert.crt
@@ -1 +1 @@
-certificate data
\ No newline at end of file
+qnetd CA certificate
\ No newline at end of file
diff --git a/pcs/test/resources/qdevice-certs/signed-certificate.crt b/pcs/test/resources/qdevice-certs/signed-certificate.crt
new file mode 100644
index 0000000..f4a034e
--- /dev/null
+++ b/pcs/test/resources/qdevice-certs/signed-certificate.crt
@@ -0,0 +1 @@
+qdevice certificate request signed by qnetd
diff --git a/pcs/test/test_lib_commands_quorum.py b/pcs/test/test_lib_commands_quorum.py
index 5f77f38..807e528 100644
--- a/pcs/test/test_lib_commands_quorum.py
+++ b/pcs/test/test_lib_commands_quorum.py
@@ -4,24 +4,26 @@ from __future__ import (
     print_function,
 )
 
+import base64
 import logging
-from pcs.test.tools.pcs_unittest import TestCase, skip
 
+from pcs.test.tools import fixture
 from pcs.test.tools.assertions import (
     ac,
     assert_raise_library_error,
     assert_report_item_list_equal,
 )
+from pcs.test.tools.command_env import get_env_tools
 from pcs.test.tools.custom_mock import MockLibraryReportProcessor
-from pcs.test.tools.misc import get_test_resource as rc
-from pcs.test.tools.pcs_unittest import mock
+from pcs.test.tools.misc import (
+    get_test_resource as rc,
+    outdent,
+)
+from pcs.test.tools.pcs_unittest import mock, skip, TestCase
 
 from pcs.common import report_codes
 from pcs.lib.env import LibraryEnvironment
-from pcs.lib.errors import (
-    LibraryError,
-    ReportItemSeverity as severity,
-)
+from pcs.lib.errors import ReportItemSeverity as severity
 from pcs.lib.corosync.config_facade import ConfigFacade
 from pcs.lib.external import NodeCommunicationException
 from pcs.lib.node import NodeAddresses, NodeAddressesList
@@ -137,6 +139,55 @@ class GetQuorumConfigTest(TestCase, CmanMixin):
                     "generic_options": {
                         "option": "value",
                     },
+                    "heuristics_options": {
+                    },
+                },
+            },
+            lib.get_config(lib_env)
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_device_with_heuristics(self, mock_get_corosync):
+        original_conf = """\
+            quorum {
+                provider: corosync_votequorum
+                wait_for_all: 1
+                device {
+                    option: value
+                    model: net
+                    net {
+                        host: 127.0.0.1
+                        port: 4433
+                    }
+                    heuristics {
+                        mode: on
+                        exec_ls: test -f /tmp/test
+                    }
+                }
+            }
+        """
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        self.assertEqual(
+            {
+                "options": {
+                    "wait_for_all": "1",
+                },
+                "device": {
+                    "model": "net",
+                    "model_options": {
+                        "host": "127.0.0.1",
+                        "port": "4433",
+                    },
+                    "generic_options": {
+                        "option": "value",
+                    },
+                    "heuristics_options": {
+                        "exec_ls": "test -f /tmp/test",
+                        "mode": "on",
+                    },
                 },
             },
             lib.get_config(lib_env)
@@ -411,6 +462,7 @@ class SetQuorumOptionsTest(TestCase, CmanMixin):
                         "last_man_standing_window",
                         "wait_for_all",
                     ],
+                    "allowed_patterns": [],
                 }
             )
         )
@@ -507,998 +559,1282 @@ class StatusDeviceTextTest(TestCase, CmanMixin):
         mock_status.assert_called_once_with("mock_runner", True)
 
 
- at skip("TODO: rewrite using new testing fremework")
- at mock.patch.object(LibraryEnvironment, "push_corosync_conf")
- at mock.patch.object(LibraryEnvironment, "get_corosync_conf_data")
- at mock.patch("pcs.lib.commands.quorum._add_device_model_net")
- at mock.patch("pcs.lib.commands.quorum.qdevice_client.remote_client_enable")
- at mock.patch("pcs.lib.commands.quorum.qdevice_client.remote_client_start")
-class AddDeviceTest(TestCase, CmanMixin):
+class AddDeviceNetTest(TestCase):
     def setUp(self):
-        self.mock_logger = mock.MagicMock(logging.Logger)
-        self.mock_reporter = MockLibraryReportProcessor()
-
-    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
-    def test_disabled_on_cman(
-        self, mock_client_start, mock_client_enable, mock_add_net,
-        mock_get_corosync, mock_push_corosync
-    ):
-        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-        self.assert_disabled_on_cman(
-            lambda: lib.add_device(lib_env, "net", {"host": "127.0.0.1"}, {})
+        self.env_assist, self.config = get_env_tools(self)
+
+        self.qnetd_host = "qnetd-host"
+        self.corosync_conf_name = "corosync-3nodes.conf"
+        # the cluster name is defined in the corosync-3nodes.conf file
+        self.cluster_name = "test99"
+        # nodes are defined in the corosync-3nodes.conf file
+        self.cluster_nodes = ["rh7-1", "rh7-2", "rh7-3"]
+        self.certs = {
+            "cacert": {
+                "path": rc("qdevice-certs/qnetd-cacert.crt"),
+            },
+            "cert_request": {
+                "path": rc("qdevice-certs/qdevice-cert-request.crq"),
+            },
+            "signed_request": {
+                "path": rc("qdevice-certs/signed-certificate.crt"),
+            },
+            "final_cert": {
+                "path": rc("qdevice-certs/final-certificate.pk12"),
+            },
+        }
+        for cert_info in self.certs.values():
+            # b64encode accepts bytes in python3, so we must read the file as
+            # binary to get bytes instead of a string. In python2, it doesn't
+            # matter.
+            plain = open(cert_info["path"], "rb").read()
+            cert_info["data"] = plain
+            # Convert bytes to string in python3, because the communicator does
+            # it the same way - it accepts bytes, converts them to string and
+            # passes that to further processing.
+            cert_info["b64data"] = base64.b64encode(plain).decode("utf-8")
+
+    def fixture_config_http_get_ca_cert(self):
+        self.config.http.add_communication(
+            "http.get_ca_certificate",
+            [
+                {"label": self.qnetd_host, },
+            ],
+            action="remote/qdevice_net_get_ca_certificate",
+            response_code=200,
+            output=self.certs["cacert"]["b64data"]
+        )
+
+    def fixture_config_http_client_init(self):
+        self.config.http.add_communication(
+            "http.client_init",
+            [{"label": node} for node in self.cluster_nodes],
+            action="remote/qdevice_net_client_init_certificate_storage",
+            param_list=[
+                ("ca_certificate", self.certs["cacert"]["b64data"]),
+            ],
+            response_code=200,
+        )
+
+    def fixture_config_runner_get_cert_request(self):
+        self.config.runner.place(
+            "corosync-qdevice-net-certutil -r -n {cluster_name}".format(
+                cluster_name=self.cluster_name
+            ),
+            name="runner.corosync.qdevice.cert-request",
+            stdout="Certificate request stored in {path}".format(
+                path=self.certs["cert_request"]["path"]
+            )
         )
-        mock_get_corosync.assert_not_called()
-        mock_push_corosync.assert_not_called()
-        mock_add_net.assert_not_called()
-        mock_client_enable.assert_not_called()
-        mock_client_start.assert_not_called()
 
-    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
-    def test_enabled_on_cman_if_not_live(
-        self, mock_client_start, mock_client_enable, mock_add_net,
-        mock_get_corosync, mock_push_corosync
-    ):
-        original_conf = open(rc("corosync-3nodes.conf")).read()
-        mock_get_corosync.return_value = original_conf
-        lib_env = LibraryEnvironment(
-            self.mock_logger,
-            self.mock_reporter,
-            corosync_conf_data=original_conf
+    def fixture_config_http_sign_cert_request(self):
+        self.config.http.add_communication(
+            "http.sign_certificate_request",
+            [
+                {"label": self.qnetd_host, },
+            ],
+            action="remote/qdevice_net_sign_node_certificate",
+            param_list=[
+                (
+                    "certificate_request",
+                    self.certs["cert_request"]["b64data"]
+                ),
+                ("cluster_name", self.cluster_name),
+            ],
+            response_code=200,
+            output=self.certs["signed_request"]["b64data"]
         )
 
-        assert_raise_library_error(
-            lambda: lib.add_device(lib_env, "bad model", {}, {}),
-            (
-                severity.ERROR,
-                report_codes.INVALID_OPTION_VALUE,
-                {
-                    "option_name": "model",
-                    "option_value": "bad model",
-                    "allowed_values": ("net", ),
-                },
-                report_codes.FORCE_QDEVICE_MODEL
+    def fixture_config_runner_cert_to_pk12(self, cert_file_path):
+        self.config.runner.place(
+            "corosync-qdevice-net-certutil -M -c {file_path}".format(
+                file_path=cert_file_path
+            ),
+            name="runner.corosync.qdevice.cert-to-pk12",
+            stdout="Certificate request stored in {path}".format(
+                path=self.certs["final_cert"]["path"]
             )
         )
 
-        self.assertEqual(1, mock_get_corosync.call_count)
-        self.assertEqual(0, mock_push_corosync.call_count)
-        mock_add_net.assert_not_called()
-        mock_client_enable.assert_not_called()
-        mock_client_start.assert_not_called()
+    def fixture_config_http_import_final_cert(self):
+        self.config.http.add_communication(
+            "http.client_import_certificate",
+            [{"label": node} for node in self.cluster_nodes],
+            action="remote/qdevice_net_client_import_certificate",
+            param_list=[
+                ("certificate", self.certs["final_cert"]["b64data"]),
+            ],
+            response_code=200,
+        )
 
-    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-    def test_success(
-        self, mock_client_start, mock_client_enable, mock_add_net,
-        mock_get_corosync, mock_push_corosync
-    ):
-        original_conf = open(rc("corosync-3nodes.conf")).read()
-        mock_get_corosync.return_value = original_conf
-        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+    def fixture_config_http_qdevice_enable(self):
+        self.config.http.add_communication(
+            "http.qdevice_enable",
+            [{"label": node} for node in self.cluster_nodes],
+            action="remote/qdevice_client_enable",
+            response_code=200,
+            output="corosync-qdevice enabled"
+        )
 
-        lib.add_device(
-            lib_env,
-            "net",
-            {"host": "127.0.0.1", "algorithm": "ffsplit"},
-            {"timeout": "12345"}
+    def fixture_config_http_qdevice_start(self):
+        self.config.http.add_communication(
+            "http.qdevice_start",
+            [{"label": node} for node in self.cluster_nodes],
+            action="remote/qdevice_client_start",
+            response_code=200,
+            output="corosync-qdevice started"
         )
 
-        self.assertEqual(1, len(mock_push_corosync.mock_calls))
-        ac(
-            mock_push_corosync.mock_calls[0][1][0].config.export(),
-            original_conf.replace(
-                "provider: corosync_votequorum\n",
-                """provider: corosync_votequorum
+    def fixture_config_success(
+        self, expected_corosync_conf, cert_to_pk12_cert_path
+    ):
+        self.config.runner.corosync.version()
+        self.config.corosync_conf.load(filename=self.corosync_conf_name)
+        self.fixture_config_http_get_ca_cert()
+        self.fixture_config_http_client_init()
+        self.fixture_config_runner_get_cert_request()
+        self.fixture_config_http_sign_cert_request()
+        self.fixture_config_runner_cert_to_pk12(cert_to_pk12_cert_path)
+        self.fixture_config_http_import_final_cert()
+        self.fixture_config_http_qdevice_enable()
+        self.config.env.push_corosync_conf(
+            corosync_conf_text=expected_corosync_conf
+        )
+        self.fixture_config_http_qdevice_start()
+
+    def fixture_reports_success(self):
+        return [
+            fixture.info(report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED),
+        ] + [
+            fixture.info(
+                report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
+                node=node
+            )
+            for node in self.cluster_nodes
+        ] + [
+            fixture.info(
+                report_codes.SERVICE_ENABLE_STARTED,
+                service="corosync-qdevice"
+            ),
+        ] + [
+            fixture.info(
+                report_codes.SERVICE_ENABLE_SUCCESS,
+                node=node,
+                service="corosync-qdevice"
+            )
+            for node in self.cluster_nodes
+        ] + [
+            fixture.info(
+                report_codes.SERVICE_START_STARTED,
+                service="corosync-qdevice"
+            ),
+        ] + [
+            fixture.info(
+                report_codes.SERVICE_START_SUCCESS,
+                node=node,
+                service="corosync-qdevice"
+            )
+            for node in self.cluster_nodes
+        ]
 
-    device {
-        timeout: 12345
-        model: net
-        votes: 1
+    def test_disabled_on_cman(self):
+        self.config.runner.corosync.version(version="1.4.7")
+        self.env_assist.assert_raise_library_error(
+            lambda: lib.add_device(
+                self.env_assist.get_env(),
+                "net",
+                {"host": "qnetd-host"},
+                {},
+                {}
+            ),
+            [
+                fixture.error(report_codes.CMAN_UNSUPPORTED_COMMAND),
+            ],
+            expected_in_processor=False
+        )
 
-        net {
-            algorithm: ffsplit
-            host: 127.0.0.1
-        }
-    }
-"""
-            )
+    def test_does_not_check_cman_if_not_live(self):
+        (self.config
+            .env.set_corosync_conf_data(open(rc("corosync-3nodes.conf")).read())
         )
-        assert_report_item_list_equal(
-            self.mock_reporter.report_item_list,
+        self.env_assist.assert_raise_library_error(
+            lambda: lib.add_device(
+                self.env_assist.get_env(),
+                "bad model",
+                {},
+                {},
+                {}
+            ),
             [
-                (
-                    severity.INFO,
-                    report_codes.SERVICE_ENABLE_STARTED,
-                    {
-                        "service": "corosync-qdevice",
-                    }
-                ),
-                (
-                    severity.INFO,
-                    report_codes.SERVICE_START_STARTED,
-                    {
-                        "service": "corosync-qdevice",
-                    }
+                fixture.error(
+                    report_codes.INVALID_OPTION_VALUE,
+                    force_code=report_codes.FORCE_QDEVICE_MODEL,
+                    option_name="model",
+                    option_value="bad model",
+                    allowed_values=("net", )
                 ),
             ]
         )
-        self.assertEqual(1, len(mock_add_net.mock_calls))
-        self.assertEqual(3, len(mock_client_enable.mock_calls))
-        self.assertEqual(3, len(mock_client_start.mock_calls))
-
-    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-    def test_success_file(
-        self, mock_client_start, mock_client_enable, mock_add_net,
-        mock_get_corosync, mock_push_corosync
-    ):
-        original_conf = open(rc("corosync-3nodes.conf")).read()
-        mock_get_corosync.return_value = original_conf
-        lib_env = LibraryEnvironment(
-            self.mock_logger,
-            self.mock_reporter,
-            corosync_conf_data=original_conf
-        )
-
-        lib.add_device(
-            lib_env,
-            "net",
-            {"host": "127.0.0.1", "algorithm": "ffsplit"},
-            {"timeout": "12345"}
-        )
 
-        self.assertEqual(1, len(mock_push_corosync.mock_calls))
-        ac(
-            mock_push_corosync.mock_calls[0][1][0].config.export(),
-            original_conf.replace(
-                "provider: corosync_votequorum\n",
-                """provider: corosync_votequorum
+    def test_fail_if_device_already_set(self):
+        corosync_conf = open(
+                rc(self.corosync_conf_name)
+            ).read().replace(
+            "    provider: corosync_votequorum\n",
+            outdent("""\
+                    provider: corosync_votequorum
 
-    device {
-        timeout: 12345
-        model: net
-        votes: 1
+                    device {
+                        model: net
 
-        net {
-            algorithm: ffsplit
-            host: 127.0.0.1
-        }
-    }
-"""
+                        net {
+                            algorithm: ffsplit
+                            host: qnetd-host
+                        }
+                    }
+                """
             )
         )
-        self.assertEqual([], self.mock_reporter.report_item_list)
-        mock_add_net.assert_not_called()
-        mock_client_enable.assert_not_called()
-        mock_client_start.assert_not_called()
 
-    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-    def test_invalid_options(
-        self, mock_client_start, mock_client_enable, mock_add_net,
-        mock_get_corosync, mock_push_corosync
-    ):
-        original_conf = open(rc("corosync-3nodes.conf")).read()
-        mock_get_corosync.return_value = original_conf
-        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+        self.config.runner.corosync.version()
+        self.config.corosync_conf.load_content(corosync_conf)
 
-        assert_raise_library_error(
+        self.env_assist.assert_raise_library_error(
             lambda: lib.add_device(
-                lib_env,
+                self.env_assist.get_env(),
                 "net",
-                {"host": "127.0.0.1", "algorithm": "ffsplit"},
-                {"bad_option": "bad_value", }
+                {"host": "qnetd-host"},
+                {},
+                {}
             ),
-            (
-                severity.ERROR,
-                report_codes.INVALID_OPTION,
-                {
-                    "option_names": ["bad_option"],
-                    "option_type": "quorum device",
-                    "allowed": ["sync_timeout", "timeout"],
-                },
-                report_codes.FORCE_OPTIONS
+            [
+                fixture.error(report_codes.QDEVICE_ALREADY_DEFINED),
+            ],
+            expected_in_processor=False
+        )
+
+    @mock.patch("pcs.lib.corosync.qdevice_net.client_initialized", lambda: True)
+    @mock.patch("pcs.lib.corosync.qdevice_net.write_tmpfile")
+    def test_success_minimal(self, mock_write_tmpfile):
+        tmpfile_instance = mock.MagicMock()
+        tmpfile_instance.name = rc("file.tmp")
+        mock_write_tmpfile.return_value = tmpfile_instance
+
+        expected_corosync_conf = open(
+                rc(self.corosync_conf_name)
+            ).read().replace(
+            "    provider: corosync_votequorum\n",
+            outdent("""\
+                    provider: corosync_votequorum
+
+                    device {
+                        model: net
+                        votes: 1
+
+                        net {
+                            algorithm: ffsplit
+                            host: qnetd-host
+                        }
+                    }
+                """
             )
         )
 
-        self.assertEqual(1, mock_get_corosync.call_count)
-        self.assertEqual(0, mock_push_corosync.call_count)
-        mock_add_net.assert_not_called()
-        mock_client_enable.assert_not_called()
-        mock_client_start.assert_not_called()
-
-    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-    def test_invalid_options_forced(
-        self, mock_client_start, mock_client_enable, mock_add_net,
-        mock_get_corosync, mock_push_corosync
-    ):
-        original_conf = open(rc("corosync-3nodes.conf")).read()
-        mock_get_corosync.return_value = original_conf
-        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+        self.fixture_config_success(
+            expected_corosync_conf,
+            tmpfile_instance.name
+        )
 
         lib.add_device(
-            lib_env,
+            self.env_assist.get_env(),
             "net",
-            {"host": "127.0.0.1", "algorithm": "ffsplit"},
-            {"bad_option": "bad_value", },
-            force_options=True
+            {"host": self.qnetd_host, "algorithm": "ffsplit"},
+            {},
+            {}
         )
 
-        assert_report_item_list_equal(
-            self.mock_reporter.report_item_list,
-            [
-                (
-                    severity.WARNING,
-                    report_codes.INVALID_OPTION,
-                    {
-                        "option_names": ["bad_option"],
-                        "option_type": "quorum device",
-                        "allowed": ["sync_timeout", "timeout"],
-                    }
-                ),
-                (
-                    severity.INFO,
-                    report_codes.SERVICE_ENABLE_STARTED,
-                    {
-                        "service": "corosync-qdevice",
-                    }
-                ),
-                (
-                    severity.INFO,
-                    report_codes.SERVICE_START_STARTED,
-                    {
-                        "service": "corosync-qdevice",
-                    }
-                ),
-            ]
+        mock_write_tmpfile.assert_called_once_with(
+            self.certs["signed_request"]["data"],
+            binary=True
         )
-        self.assertEqual(1, mock_get_corosync.call_count)
-        self.assertEqual(1, len(mock_push_corosync.mock_calls))
-        ac(
-            mock_push_corosync.mock_calls[0][1][0].config.export(),
-            original_conf.replace(
-                "provider: corosync_votequorum\n",
-                """provider: corosync_votequorum
+        self.env_assist.assert_reports(self.fixture_reports_success())
 
-    device {
-        bad_option: bad_value
-        model: net
-        votes: 1
+    @mock.patch("pcs.lib.corosync.qdevice_net.client_initialized", lambda: True)
+    @mock.patch("pcs.lib.corosync.qdevice_net.write_tmpfile")
+    def test_success_heuristics_no_exec(self, mock_write_tmpfile):
+        tmpfile_instance = mock.MagicMock()
+        tmpfile_instance.name = rc("file.tmp")
+        mock_write_tmpfile.return_value = tmpfile_instance
 
-        net {
-            algorithm: ffsplit
-            host: 127.0.0.1
-        }
-    }
-"""
-            )
-        )
-        self.assertEqual(1, len(mock_add_net.mock_calls))
-        self.assertEqual(3, len(mock_client_enable.mock_calls))
-        self.assertEqual(3, len(mock_client_start.mock_calls))
+        expected_corosync_conf = open(
+                rc(self.corosync_conf_name)
+            ).read().replace(
+            "    provider: corosync_votequorum\n",
+            outdent("""\
+                    provider: corosync_votequorum
 
-    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-    def test_invalid_model(
-        self, mock_client_start, mock_client_enable, mock_add_net,
-        mock_get_corosync, mock_push_corosync
-    ):
-        original_conf = open(rc("corosync-3nodes.conf")).read()
-        mock_get_corosync.return_value = original_conf
-        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+                    device {
+                        model: net
+                        votes: 1
 
-        assert_raise_library_error(
-            lambda: lib.add_device(lib_env, "bad model", {}, {}),
-            (
-                severity.ERROR,
-                report_codes.INVALID_OPTION_VALUE,
-                {
-                    "option_name": "model",
-                    "option_value": "bad model",
-                    "allowed_values": ("net", ),
-                },
-                report_codes.FORCE_QDEVICE_MODEL
+                        net {
+                            algorithm: ffsplit
+                            host: qnetd-host
+                        }
+
+                        heuristics {
+                            mode: on
+                        }
+                    }
+                """
             )
         )
 
-        self.assertEqual(1, mock_get_corosync.call_count)
-        self.assertEqual(0, mock_push_corosync.call_count)
-        mock_add_net.assert_not_called()
-        mock_client_enable.assert_not_called()
-        mock_client_start.assert_not_called()
-
-    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
-    def test_invalid_model_forced(
-        self, mock_client_start, mock_client_enable, mock_add_net,
-        mock_get_corosync, mock_push_corosync
-    ):
-        original_conf = open(rc("corosync-3nodes.conf")).read()
-        mock_get_corosync.return_value = original_conf
-        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+        self.fixture_config_success(
+            expected_corosync_conf,
+            tmpfile_instance.name
+        )
 
-        lib.add_device(lib_env, "bad model", {}, {}, force_model=True)
+        lib.add_device(
+            self.env_assist.get_env(),
+            "net",
+            {"host": self.qnetd_host, "algorithm": "ffsplit"},
+            {},
+            { "mode": "on"}
+        )
 
-        assert_report_item_list_equal(
-            self.mock_reporter.report_item_list,
+        mock_write_tmpfile.assert_called_once_with(
+            self.certs["signed_request"]["data"],
+            binary=True
+        )
+        self.env_assist.assert_reports(
+            self.fixture_reports_success()
+            +
             [
-                (
-                    severity.WARNING,
-                    report_codes.INVALID_OPTION_VALUE,
-                    {
-                        "option_name": "model",
-                        "option_value": "bad model",
-                        "allowed_values": ("net", ),
-                    },
-                ),
-                (
-                    severity.INFO,
-                    report_codes.SERVICE_ENABLE_STARTED,
-                    {
-                        "service": "corosync-qdevice",
-                    }
-                ),
-                (
-                    severity.INFO,
-                    report_codes.SERVICE_START_STARTED,
-                    {
-                        "service": "corosync-qdevice",
-                    }
-                ),
+                fixture.warn(
+                    report_codes.COROSYNC_QUORUM_HEURISTICS_ENABLED_WITH_NO_EXEC
+                )
             ]
         )
-        self.assertEqual(1, mock_get_corosync.call_count)
-        self.assertEqual(1, len(mock_push_corosync.mock_calls))
-        ac(
-            mock_push_corosync.mock_calls[0][1][0].config.export(),
-            original_conf.replace(
-                "provider: corosync_votequorum\n",
-                """provider: corosync_votequorum
 
-    device {
-        model: bad model
-    }
-"""
+    @mock.patch("pcs.lib.corosync.qdevice_net.client_initialized", lambda: True)
+    @mock.patch("pcs.lib.corosync.qdevice_net.write_tmpfile")
+    def test_success_full(self, mock_write_tmpfile):
+        tmpfile_instance = mock.MagicMock()
+        tmpfile_instance.name = rc("file.tmp")
+        mock_write_tmpfile.return_value = tmpfile_instance
+
+        expected_corosync_conf = open(
+                rc(self.corosync_conf_name)
+            ).read().replace(
+            "    provider: corosync_votequorum\n",
+            outdent("""\
+                    provider: corosync_votequorum
+
+                    device {
+                        sync_timeout: 34567
+                        timeout: 23456
+                        model: net
+                        votes: 1
+
+                        net {
+                            algorithm: ffsplit
+                            connect_timeout: 12345
+                            force_ip_version: 4
+                            host: qnetd-host
+                            port: 4433
+                            tie_breaker: lowest
+                        }
+
+                        heuristics {
+                            exec_ls: test -f /tmp/test
+                            exec_ping: ping -q -c 1 "127.0.0.1"
+                            interval: 30
+                            mode: on
+                            sync_timeout: 15
+                            timeout: 5
+                        }
+                    }
+                """
             )
         )
-        mock_add_net.assert_not_called() # invalid model - don't setup net model
-        self.assertEqual(3, len(mock_client_enable.mock_calls))
-        self.assertEqual(3, len(mock_client_start.mock_calls))
 
- at skip("TODO: rewrite using new testing fremework")
- at mock.patch(
-    "pcs.lib.commands.quorum.qdevice_net.remote_client_import_certificate_and_key"
-)
- at mock.patch("pcs.lib.commands.quorum.qdevice_net.client_cert_request_to_pk12")
- at mock.patch(
-    "pcs.lib.commands.quorum.qdevice_net.remote_sign_certificate_request"
-)
- at mock.patch(
-    "pcs.lib.commands.quorum.qdevice_net.client_generate_certificate_request"
-)
- at mock.patch("pcs.lib.commands.quorum.qdevice_net.remote_client_setup")
- at mock.patch(
-    "pcs.lib.commands.quorum.qdevice_net.remote_qdevice_get_ca_certificate"
-)
- at mock.patch.object(
-    LibraryEnvironment,
-    "cmd_runner",
-    lambda self: "mock_runner"
-)
- at mock.patch.object(
-    LibraryEnvironment,
-    "node_communicator",
-    lambda self: "mock_communicator"
-)
-class AddDeviceNetTest(TestCase):
-    #pylint: disable=too-many-instance-attributes
-    def setUp(self):
-        self.mock_logger = mock.MagicMock(logging.Logger)
-        self.mock_reporter = MockLibraryReportProcessor()
-        self.lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
-        self.qnetd_host = "qnetd_host"
-        self.cluster_name = "clusterName"
-        self.nodes = NodeAddressesList([
-            NodeAddresses("node1"),
-            NodeAddresses("node2"),
-        ])
-        self.ca_cert = "CA certificate"
-        self.cert_request = "client certificate request"
-        self.signed_cert = "signed certificate"
-        self.final_cert = "final client certificate"
-
-    def test_success(
-        self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-        mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
-    ):
-        mock_get_ca.return_value = self.ca_cert
-        mock_get_cert_request.return_value = self.cert_request
-        mock_sign_cert_request.return_value = self.signed_cert
-        mock_cert_to_pk12.return_value = self.final_cert
-        skip_offline_nodes = False
-
-        lib._add_device_model_net(
-            self.lib_env,
-            self.qnetd_host,
-            self.cluster_name,
-            self.nodes,
-            skip_offline_nodes
+        self.fixture_config_success(
+            expected_corosync_conf,
+            tmpfile_instance.name
         )
 
-        assert_report_item_list_equal(
-            self.mock_reporter.report_item_list,
-            [
-                (
-                    severity.INFO,
-                    report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
-                    {}
-                ),
-                (
-                    severity.INFO,
-                    report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
-                    {
-                        "node": self.nodes[0].label
-                    }
-                ),
-                (
-                    severity.INFO,
-                    report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
-                    {
-                        "node": self.nodes[1].label
-                    }
-                ),
-            ]
-        )
-        mock_get_ca.assert_called_once_with(
-            "mock_communicator",
-            self.qnetd_host
-        )
-        client_setup_calls = [
-            mock.call("mock_communicator", self.nodes[0], self.ca_cert),
-            mock.call("mock_communicator", self.nodes[1], self.ca_cert),
-        ]
-        self.assertEqual(
-            len(client_setup_calls),
-            len(mock_client_setup.mock_calls)
-        )
-        mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
-        mock_get_cert_request.assert_called_once_with(
-            "mock_runner",
-            self.cluster_name
-        )
-        mock_sign_cert_request.assert_called_once_with(
-            "mock_communicator",
-            self.qnetd_host,
-            self.cert_request,
-            self.cluster_name
-        )
-        mock_cert_to_pk12.assert_called_once_with(
-            "mock_runner",
-            self.signed_cert
-        )
-        client_import_calls = [
-            mock.call("mock_communicator", self.nodes[0], self.final_cert),
-            mock.call("mock_communicator", self.nodes[1], self.final_cert),
-        ]
-        self.assertEqual(
-            len(client_import_calls),
-            len(mock_import_cert.mock_calls)
+        lib.add_device(
+            self.env_assist.get_env(),
+            "net",
+            {
+                "host": self.qnetd_host,
+                "port": "4433",
+                "algorithm": "ffsplit",
+                "connect_timeout": "12345",
+                "force_ip_version": "4",
+                "tie_breaker": "lowest",
+            },
+            {
+                "timeout": "23456",
+                "sync_timeout": "34567"
+            },
+            {
+                "mode": "on",
+                "timeout": "5",
+                "sync_timeout": "15",
+                "interval": "30",
+                "exec_ping": 'ping -q -c 1 "127.0.0.1"',
+                "exec_ls": "test -f /tmp/test",
+            }
         )
-        mock_import_cert.assert_has_calls(client_import_calls, any_order=True)
 
-    def test_error_get_ca_cert(
-        self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-        mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
-    ):
-        mock_get_ca.side_effect = NodeCommunicationException(
-            "host", "command", "reason"
+        mock_write_tmpfile.assert_called_once_with(
+            self.certs["signed_request"]["data"],
+            binary=True
         )
-        mock_get_cert_request.return_value = self.cert_request
-        mock_sign_cert_request.return_value = self.signed_cert
-        mock_cert_to_pk12.return_value = self.final_cert
-        skip_offline_nodes = False
 
-        assert_raise_library_error(
-            lambda: lib._add_device_model_net(
-                self.lib_env,
-                self.qnetd_host,
-                self.cluster_name,
-                self.nodes,
-                skip_offline_nodes
-            ),
-            (
-                severity.ERROR,
-                report_codes.NODE_COMMUNICATION_ERROR,
-                {}
+        self.env_assist.assert_reports(self.fixture_reports_success())
+
+    @mock.patch("pcs.lib.corosync.qdevice_net.client_initialized", lambda: True)
+    @mock.patch("pcs.lib.corosync.qdevice_net.write_tmpfile")
+    def test_success_one_node_offline(self, mock_write_tmpfile):
+        node_2_offline_msg = (
+            "Failed connect to {0}:2224; No route to host".format(
+                self.cluster_nodes[1]
             )
         )
+        node_2_offline_responses = [
+            {"label": self.cluster_nodes[0]},
+            {
+                "label": self.cluster_nodes[1],
+                "was_connected": False,
+                "errno": 7,
+                "error_msg": node_2_offline_msg,
+            },
+            {"label": self.cluster_nodes[2]},
+        ]
+        def node_2_offline_warning(command):
+            return fixture.warn(
+                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+                node=self.cluster_nodes[1],
+                reason=node_2_offline_msg,
+                command=command
+            )
 
-        assert_report_item_list_equal(
-            self.mock_reporter.report_item_list,
-            [
-                (
-                    severity.INFO,
-                    report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
-                    {}
-                )
-            ]
+        tmpfile_instance = mock.MagicMock()
+        tmpfile_instance.name = rc("file.tmp")
+        mock_write_tmpfile.return_value = tmpfile_instance
+
+        expected_corosync_conf = open(
+                rc(self.corosync_conf_name)
+            ).read().replace(
+            "    provider: corosync_votequorum\n",
+            outdent("""\
+                    provider: corosync_votequorum
+
+                    device {
+                        model: net
+                        votes: 1
+
+                        net {
+                            algorithm: ffsplit
+                            host: qnetd-host
+                        }
+                    }
+                """
+            )
         )
-        mock_get_ca.assert_called_once_with(
-            "mock_communicator",
-            self.qnetd_host
+
+        self.config.runner.corosync.version()
+        self.config.corosync_conf.load(filename=self.corosync_conf_name)
+        self.fixture_config_http_get_ca_cert()
+        self.config.http.add_communication(
+            "http.client_init",
+            node_2_offline_responses,
+            action="remote/qdevice_net_client_init_certificate_storage",
+            param_list=[
+                ("ca_certificate", self.certs["cacert"]["b64data"]),
+            ],
+            response_code=200,
+        )
+        self.fixture_config_runner_get_cert_request()
+        self.fixture_config_http_sign_cert_request()
+        self.fixture_config_runner_cert_to_pk12(tmpfile_instance.name)
+        self.config.http.add_communication(
+            "http.client_import_certificate",
+            node_2_offline_responses,
+            action="remote/qdevice_net_client_import_certificate",
+            param_list=[
+                ("certificate", self.certs["final_cert"]["b64data"]),
+            ],
+            response_code=200,
+        )
+        self.config.http.add_communication(
+            "http.qdevice_enable",
+            node_2_offline_responses,
+            action="remote/qdevice_client_enable",
+            response_code=200,
+            output="corosync-qdevice enabled"
+        )
+        self.config.env.push_corosync_conf(
+            corosync_conf_text=expected_corosync_conf
+        )
+        self.config.http.add_communication(
+            "http.qdevice_start",
+            node_2_offline_responses,
+            action="remote/qdevice_client_start",
+            response_code=200,
+            output="corosync-qdevice started"
         )
-        mock_client_setup.assert_not_called()
-        mock_get_cert_request.assert_not_called()
-        mock_sign_cert_request.assert_not_called()
-        mock_cert_to_pk12.assert_not_called()
-        mock_import_cert.assert_not_called()
 
+        lib.add_device(
+            self.env_assist.get_env(),
+            "net",
+            {"host": self.qnetd_host, "algorithm": "ffsplit"},
+            {},
+            {},
+            skip_offline_nodes=True
+        )
 
-    def test_error_client_setup(
-        self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-        mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
-    ):
-        mock_get_ca.return_value = self.ca_cert
-        def raiser(communicator, node, cert):
-            if node == self.nodes[1]:
-                raise NodeCommunicationException("host", "command", "reason")
-        mock_client_setup.side_effect = raiser
-        mock_get_cert_request.return_value = self.cert_request
-        mock_sign_cert_request.return_value = self.signed_cert
-        mock_cert_to_pk12.return_value = self.final_cert
-        skip_offline_nodes = False
+        mock_write_tmpfile.assert_called_once_with(
+            self.certs["signed_request"]["data"],
+            binary=True
+        )
 
-        assert_raise_library_error(
-            lambda: lib._add_device_model_net(
-                self.lib_env,
-                self.qnetd_host,
-                self.cluster_name,
-                self.nodes,
-                skip_offline_nodes
+        self.env_assist.assert_reports([
+            fixture.info(report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED),
+            node_2_offline_warning(
+                "remote/qdevice_net_client_init_certificate_storage"
             ),
-            (
-                severity.ERROR,
-                report_codes.NODE_COMMUNICATION_ERROR,
-                {},
-                report_codes.SKIP_OFFLINE_NODES
+            fixture.info(
+                report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
+                node=self.cluster_nodes[0]
+            ),
+            node_2_offline_warning(
+                "remote/qdevice_net_client_import_certificate"
+            ),
+            fixture.info(
+                report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
+                node=self.cluster_nodes[2]
+            ),
+            fixture.info(
+                report_codes.SERVICE_ENABLE_STARTED,
+                service="corosync-qdevice"
+            ),
+            fixture.info(
+                report_codes.SERVICE_ENABLE_SUCCESS,
+                node=self.cluster_nodes[0],
+                service="corosync-qdevice"
+            ),
+            node_2_offline_warning("remote/qdevice_client_enable"),
+            fixture.info(
+                report_codes.SERVICE_ENABLE_SUCCESS,
+                node=self.cluster_nodes[2],
+                service="corosync-qdevice"
+            ),
+            fixture.info(
+                report_codes.SERVICE_START_STARTED,
+                service="corosync-qdevice"
+            ),
+            fixture.info(
+                report_codes.SERVICE_START_SUCCESS,
+                node=self.cluster_nodes[0],
+                service="corosync-qdevice"
+            ),
+            node_2_offline_warning("remote/qdevice_client_start"),
+            fixture.info(
+                report_codes.SERVICE_START_SUCCESS,
+                node=self.cluster_nodes[2],
+                service="corosync-qdevice"
+            ),
+        ])
+
+    def test_success_file_minimal(self):
+        original_corosync_conf = open(rc(self.corosync_conf_name)).read()
+        expected_corosync_conf = original_corosync_conf.replace(
+            "    provider: corosync_votequorum\n",
+            outdent("""\
+                    provider: corosync_votequorum
+
+                    device {
+                        model: net
+                        votes: 1
+
+                        net {
+                            algorithm: ffsplit
+                            host: qnetd-host
+                        }
+                    }
+                """
             )
         )
 
-        assert_report_item_list_equal(
-            self.mock_reporter.report_item_list,
-            [
-                (
-                    severity.INFO,
-                    report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
-                    {}
-                ),
-                (
-                    severity.ERROR,
-                    report_codes.NODE_COMMUNICATION_ERROR,
-                    {},
-                    report_codes.SKIP_OFFLINE_NODES
-                ),
-            ]
+        (self.config
+            .env.set_corosync_conf_data(original_corosync_conf)
+            .env.push_corosync_conf(
+                corosync_conf_text=expected_corosync_conf
+            )
         )
-        mock_get_ca.assert_called_once_with(
-            "mock_communicator",
-            self.qnetd_host
+
+        lib.add_device(
+            self.env_assist.get_env(),
+            "net",
+            {"host": "qnetd-host", "algorithm": "ffsplit"},
+            {},
+            {}
+        )
+
+    def test_success_file_full(self):
+        expected_corosync_conf = open(
+                rc(self.corosync_conf_name)
+            ).read().replace(
+            "    provider: corosync_votequorum\n",
+            outdent("""\
+                    provider: corosync_votequorum
+
+                    device {
+                        sync_timeout: 34567
+                        timeout: 23456
+                        model: net
+                        votes: 1
+
+                        net {
+                            algorithm: ffsplit
+                            connect_timeout: 12345
+                            force_ip_version: 4
+                            host: qnetd-host
+                            port: 4433
+                            tie_breaker: lowest
+                        }
+
+                        heuristics {
+                            exec_ls: test -f /tmp/test
+                            exec_ping: ping -q -c 1 "127.0.0.1"
+                            interval: 30
+                            mode: on
+                            sync_timeout: 15
+                            timeout: 5
+                        }
+                    }
+                """
+            )
         )
-        client_setup_calls = [
-            mock.call("mock_communicator", self.nodes[0], self.ca_cert),
-            mock.call("mock_communicator", self.nodes[1], self.ca_cert),
-        ]
-        self.assertEqual(
-            len(client_setup_calls),
-            len(mock_client_setup.mock_calls)
+
+        (self.config
+            .env.set_corosync_conf_data(
+                open(rc(self.corosync_conf_name)).read()
+            )
+            .env.push_corosync_conf(
+                corosync_conf_text=expected_corosync_conf
+            )
         )
-        mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
 
-    def test_error_client_setup_skip_offline(
-        self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-        mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
-    ):
-        mock_get_ca.return_value = self.ca_cert
-        def raiser(communicator, node, cert):
-            if node == self.nodes[1]:
-                raise NodeCommunicationException("host", "command", "reason")
-        mock_client_setup.side_effect = raiser
-        mock_get_cert_request.return_value = self.cert_request
-        mock_sign_cert_request.return_value = self.signed_cert
-        mock_cert_to_pk12.return_value = self.final_cert
-        skip_offline_nodes = True
+        lib.add_device(
+            self.env_assist.get_env(),
+            "net",
+            {
+                "host": self.qnetd_host,
+                "port": "4433",
+                "algorithm": "ffsplit",
+                "connect_timeout": "12345",
+                "force_ip_version": "4",
+                "tie_breaker": "lowest",
+            },
+            {
+                "timeout": "23456",
+                "sync_timeout": "34567"
+            },
+            {
+                "mode": "on",
+                "timeout": "5",
+                "sync_timeout": "15",
+                "interval": "30",
+                "exec_ping": 'ping -q -c 1 "127.0.0.1"',
+                "exec_ls": "test -f /tmp/test",
+            }
+        )
 
-        lib._add_device_model_net(
-            self.lib_env,
-            self.qnetd_host,
-            self.cluster_name,
-            self.nodes,
-            skip_offline_nodes
+    def test_invalid_options(self):
+        (self.config
+            .runner.corosync.version()
+            .corosync_conf.load(filename=self.corosync_conf_name)
         )
 
-        assert_report_item_list_equal(
-            self.mock_reporter.report_item_list,
+        self.env_assist.assert_raise_library_error(
+            lambda: lib.add_device(
+                self.env_assist.get_env(),
+                "net",
+                {"host": "qnetd-host", "algorithm": "ffsplit"},
+                {"bad_option": "bad_value"},
+                {"mode": "bad-mode", "bad_heur": "abc", "exec_bad.name": ""}
+            ),
             [
-                (
-                    severity.INFO,
-                    report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
-                    {}
+                fixture.error(
+                    report_codes.INVALID_OPTION,
+                    force_code=report_codes.FORCE_OPTIONS,
+                    option_names=["bad_option"],
+                    option_type="quorum device",
+                    allowed=["sync_timeout", "timeout"],
+                    allowed_patterns=[]
                 ),
-                (
-                    severity.WARNING,
-                    report_codes.NODE_COMMUNICATION_ERROR,
-                    {}
+                fixture.error(
+                    report_codes.INVALID_OPTION_VALUE,
+                    force_code=report_codes.FORCE_OPTIONS,
+                    option_name="mode",
+                    option_value="bad-mode",
+                    allowed_values=("off", "on", "sync")
                 ),
-                (
-                    severity.INFO,
-                    report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
-                    {
-                        "node": self.nodes[0].label
-                    }
+                fixture.error(
+                    report_codes.INVALID_OPTION,
+                    force_code=report_codes.FORCE_OPTIONS,
+                    option_names=["bad_heur"],
+                    option_type="heuristics",
+                    allowed=["interval", "mode", "sync_timeout", "timeout"],
+                    allowed_patterns=["exec_NAME"]
                 ),
-                (
-                    severity.INFO,
-                    report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
-                    {
-                        "node": self.nodes[1].label
-                    }
+                fixture.error(
+                    report_codes.INVALID_USERDEFINED_OPTIONS,
+                    option_names=["exec_bad.name"],
+                    option_type="heuristics",
+                    allowed_description=(
+                        "exec_NAME cannot contain '.:{}#' and whitespace "
+                        "characters"
+                    )
                 ),
             ]
         )
-        mock_get_ca.assert_called_once_with(
-            "mock_communicator",
-            self.qnetd_host
+
+    @mock.patch("pcs.lib.corosync.qdevice_net.client_initialized", lambda: True)
+    @mock.patch("pcs.lib.corosync.qdevice_net.write_tmpfile")
+    def test_invalid_options_forced(self, mock_write_tmpfile):
+        tmpfile_instance = mock.MagicMock()
+        tmpfile_instance.name = rc("file.tmp")
+        mock_write_tmpfile.return_value = tmpfile_instance
+
+        expected_corosync_conf = open(
+                rc(self.corosync_conf_name)
+            ).read().replace(
+            "    provider: corosync_votequorum\n",
+            outdent("""\
+                    provider: corosync_votequorum
+
+                    device {
+                        bad_option: bad_value
+                        model: net
+                        votes: 1
+
+                        net {
+                            algorithm: ffsplit
+                            host: qnetd-host
+                        }
+
+                        heuristics {
+                            bad_heur: abc
+                            mode: bad-mode
+                        }
+                    }
+                """
+            )
         )
-        client_setup_calls = [
-            mock.call("mock_communicator", self.nodes[0], self.ca_cert),
-            mock.call("mock_communicator", self.nodes[1], self.ca_cert),
-        ]
-        self.assertEqual(
-            len(client_setup_calls),
-            len(mock_client_setup.mock_calls)
+
+        self.config.runner.corosync.version()
+        self.config.corosync_conf.load(filename=self.corosync_conf_name)
+        self.fixture_config_http_get_ca_cert()
+        self.fixture_config_http_client_init()
+        self.fixture_config_runner_get_cert_request()
+        self.fixture_config_http_sign_cert_request()
+        self.fixture_config_runner_cert_to_pk12(tmpfile_instance.name)
+        self.fixture_config_http_import_final_cert()
+        self.fixture_config_http_qdevice_enable()
+        self.config.env.push_corosync_conf(
+            corosync_conf_text=expected_corosync_conf
+        )
+        self.fixture_config_http_qdevice_start()
+
+        lib.add_device(
+            self.env_assist.get_env(),
+            "net",
+            {"host": "qnetd-host", "algorithm": "ffsplit"},
+            {"bad_option": "bad_value"},
+            {"mode": "bad-mode", "bad_heur": "abc",},
+            force_options=True
         )
-        mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
-
-    def test_generate_cert_request_error(
-        self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-        mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
-    ):
-        mock_get_ca.return_value = self.ca_cert
-        mock_get_cert_request.side_effect = LibraryError()
-        mock_sign_cert_request.return_value = self.signed_cert
-        mock_cert_to_pk12.return_value = self.final_cert
-        skip_offline_nodes = False
 
-        self.assertRaises(
-            LibraryError,
-            lambda: lib._add_device_model_net(
-                self.lib_env,
-                self.qnetd_host,
-                self.cluster_name,
-                self.nodes,
-                skip_offline_nodes
+        self.env_assist.assert_reports([
+            fixture.warn(
+                report_codes.INVALID_OPTION,
+                option_names=["bad_option"],
+                option_type="quorum device",
+                allowed=["sync_timeout", "timeout"],
+                allowed_patterns=[]
+            ),
+            fixture.warn(
+                report_codes.INVALID_OPTION_VALUE,
+                option_name="mode",
+                option_value="bad-mode",
+                allowed_values=("off", "on", "sync")
+            ),
+            fixture.warn(
+                report_codes.INVALID_OPTION,
+                option_names=["bad_heur"],
+                option_type="heuristics",
+                allowed=["interval", "mode", "sync_timeout", "timeout"],
+                allowed_patterns=["exec_NAME"]
+            ),
+            fixture.info(report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED),
+        ] + [
+            fixture.info(
+                report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
+                node=node
+            )
+            for node in self.cluster_nodes
+        ] + [
+            fixture.info(
+                report_codes.SERVICE_ENABLE_STARTED,
+                service="corosync-qdevice"
+            ),
+        ] + [
+            fixture.info(
+                report_codes.SERVICE_ENABLE_SUCCESS,
+                node=node,
+                service="corosync-qdevice"
             )
+            for node in self.cluster_nodes
+        ] + [
+            fixture.info(
+                report_codes.SERVICE_START_STARTED,
+                service="corosync-qdevice"
+            ),
+        ] + [
+            fixture.info(
+                report_codes.SERVICE_START_SUCCESS,
+                node=node,
+                service="corosync-qdevice"
+            )
+            for node in self.cluster_nodes
+        ])
+
+    def test_invalid_model(self):
+        (self.config
+            .runner.corosync.version()
+            .corosync_conf.load(filename=self.corosync_conf_name)
         )
 
-        assert_report_item_list_equal(
-            self.mock_reporter.report_item_list,
+        self.env_assist.assert_raise_library_error(
+            lambda: lib.add_device(
+                self.env_assist.get_env(),
+                "bad_model",
+                {},
+                {},
+                {}
+            ),
             [
-                (
-                    severity.INFO,
-                    report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
-                    {}
-                )
+                fixture.error(
+                    report_codes.INVALID_OPTION_VALUE,
+                    force_code=report_codes.FORCE_QDEVICE_MODEL,
+                    option_name="model",
+                    option_value="bad_model",
+                    allowed_values=("net", ),
+                ),
             ]
         )
-        mock_get_ca.assert_called_once_with(
-            "mock_communicator",
-            self.qnetd_host
-        )
-        client_setup_calls = [
-            mock.call("mock_communicator", self.nodes[0], self.ca_cert),
-            mock.call("mock_communicator", self.nodes[1], self.ca_cert),
-        ]
-        self.assertEqual(
-            len(client_setup_calls),
-            len(mock_client_setup.mock_calls)
+
+    def test_invalid_model_forced(self):
+        expected_corosync_conf = open(
+                rc(self.corosync_conf_name)
+            ).read().replace(
+            "    provider: corosync_votequorum\n",
+            outdent("""\
+                    provider: corosync_votequorum
+
+                    device {
+                        model: bad_model
+                    }
+                """
+            )
         )
-        mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
-        mock_get_cert_request.assert_called_once_with(
-            "mock_runner",
-            self.cluster_name
+
+        self.config.runner.corosync.version()
+        self.config.corosync_conf.load(filename=self.corosync_conf_name)
+        # model is not "net" - do not set up certificates
+        self.fixture_config_http_qdevice_enable()
+        self.config.env.push_corosync_conf(
+            corosync_conf_text=expected_corosync_conf
         )
-        mock_sign_cert_request.assert_not_called()
-        mock_cert_to_pk12.assert_not_called()
-        mock_import_cert.assert_not_called()
+        self.fixture_config_http_qdevice_start()
 
-    def test_sign_certificate_error(
-        self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-        mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
-    ):
-        mock_get_ca.return_value = self.ca_cert
-        mock_get_cert_request.return_value = self.cert_request
-        mock_sign_cert_request.side_effect = NodeCommunicationException(
-            "host", "command", "reason"
+        lib.add_device(
+            self.env_assist.get_env(),
+            "bad_model",
+            {},
+            {},
+            {},
+            force_model=True
         )
-        mock_cert_to_pk12.return_value = self.final_cert
-        skip_offline_nodes = False
 
-        assert_raise_library_error(
-            lambda: lib._add_device_model_net(
-                self.lib_env,
-                self.qnetd_host,
-                self.cluster_name,
-                self.nodes,
-                skip_offline_nodes
+        self.env_assist.assert_reports([
+            fixture.warn(
+                report_codes.INVALID_OPTION_VALUE,
+                option_name="model",
+                option_value="bad_model",
+                allowed_values=("net", ),
             ),
-            (
-                severity.ERROR,
-                report_codes.NODE_COMMUNICATION_ERROR,
-                {}
+        ] + [
+            fixture.info(
+                report_codes.SERVICE_ENABLE_STARTED,
+                service="corosync-qdevice"
+            ),
+        ] + [
+            fixture.info(
+                report_codes.SERVICE_ENABLE_SUCCESS,
+                node=node,
+                service="corosync-qdevice"
             )
-        )
+            for node in self.cluster_nodes
+        ] + [
+            fixture.info(
+                report_codes.SERVICE_START_STARTED,
+                service="corosync-qdevice"
+            ),
+        ] + [
+            fixture.info(
+                report_codes.SERVICE_START_SUCCESS,
+                node=node,
+                service="corosync-qdevice"
+            )
+            for node in self.cluster_nodes
+        ])
 
-        assert_report_item_list_equal(
-            self.mock_reporter.report_item_list,
+    def test_error_get_ca_cert(self):
+        self.config.runner.corosync.version()
+        self.config.corosync_conf.load(filename=self.corosync_conf_name)
+        self.config.http.add_communication(
+            "http.get_ca_certificate",
             [
-                (
-                    severity.INFO,
-                    report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
-                    {}
-                )
-            ]
+                {"label": self.qnetd_host, },
+            ],
+            action="remote/qdevice_net_get_ca_certificate",
+            response_code=400,
+            output="Unable to read certificate: error description"
         )
-        mock_get_ca.assert_called_once_with(
-            "mock_communicator",
-            self.qnetd_host
-        )
-        client_setup_calls = [
-            mock.call("mock_communicator", self.nodes[0], self.ca_cert),
-            mock.call("mock_communicator", self.nodes[1], self.ca_cert),
-        ]
-        self.assertEqual(
-            len(client_setup_calls),
-            len(mock_client_setup.mock_calls)
-        )
-        mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
-        mock_get_cert_request.assert_called_once_with(
-            "mock_runner",
-            self.cluster_name
-        )
-        mock_sign_cert_request.assert_called_once_with(
-            "mock_communicator",
-            self.qnetd_host,
-            self.cert_request,
-            self.cluster_name
-        )
-        mock_cert_to_pk12.assert_not_called()
-        mock_import_cert.assert_not_called()
-
-    def test_certificate_to_pk12_error(
-        self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-        mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
-    ):
-        mock_get_ca.return_value = self.ca_cert
-        mock_get_cert_request.return_value = self.cert_request
-        mock_sign_cert_request.return_value = self.signed_cert
-        mock_cert_to_pk12.side_effect = LibraryError()
-        skip_offline_nodes = False
 
-        self.assertRaises(
-            LibraryError,
-            lambda: lib._add_device_model_net(
-                self.lib_env,
-                self.qnetd_host,
-                self.cluster_name,
-                self.nodes,
-                skip_offline_nodes
+        self.env_assist.assert_raise_library_error(
+            lambda: lib.add_device(
+                self.env_assist.get_env(),
+                "net",
+                {"host": "qnetd-host", "algorithm": "ffsplit"},
+                {"timeout": "20"},
+                {},
+                skip_offline_nodes=True # test that this does not matter
+            ),
+            [], # an empty LibraryError is raised
+            expected_in_processor=False
+        )
+
+        self.env_assist.assert_reports([
+            fixture.info(report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED),
+            fixture.error(
+                report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+                force_code=None,
+                node=self.qnetd_host,
+                command="remote/qdevice_net_get_ca_certificate",
+                reason="Unable to read certificate: error description",
             )
-        )
+        ])
 
-        assert_report_item_list_equal(
-            self.mock_reporter.report_item_list,
+    def test_error_client_setup(self):
+        self.config.runner.corosync.version()
+        self.config.corosync_conf.load(filename=self.corosync_conf_name)
+        self.fixture_config_http_get_ca_cert()
+        self.config.http.add_communication(
+            "http.client_init",
             [
-                (
-                    severity.INFO,
-                    report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
-                    {}
-                )
-            ]
-        )
-        mock_get_ca.assert_called_once_with(
-            "mock_communicator",
-            self.qnetd_host
-        )
-        client_setup_calls = [
-            mock.call("mock_communicator", self.nodes[0], self.ca_cert),
-            mock.call("mock_communicator", self.nodes[1], self.ca_cert),
-        ]
-        self.assertEqual(
-            len(client_setup_calls),
-            len(mock_client_setup.mock_calls)
-        )
-        mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
-        mock_get_cert_request.assert_called_once_with(
-            "mock_runner",
-            self.cluster_name
-        )
-        mock_sign_cert_request.assert_called_once_with(
-            "mock_communicator",
-            self.qnetd_host,
-            self.cert_request,
-            self.cluster_name
-        )
-        mock_cert_to_pk12.assert_called_once_with(
-            "mock_runner",
-            self.signed_cert
+                {"label": self.cluster_nodes[0]},
+                {
+                    "label": self.cluster_nodes[1],
+                    "response_code": 400,
+                    "output": "some error occurred",
+                },
+                {"label": self.cluster_nodes[2]},
+            ],
+            action="remote/qdevice_net_client_init_certificate_storage",
+            param_list=[
+                ("ca_certificate", self.certs["cacert"]["b64data"]),
+            ],
+            response_code=200,
         )
-        mock_import_cert.assert_not_called()
-
-    def test_client_import_cert_error(
-        self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-        mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
-    ):
-        mock_get_ca.return_value = self.ca_cert
-        mock_get_cert_request.return_value = self.cert_request
-        mock_sign_cert_request.return_value = self.signed_cert
-        mock_cert_to_pk12.return_value = self.final_cert
-        def raiser(communicator, node, cert):
-            if node == self.nodes[1]:
-                raise NodeCommunicationException("host", "command", "reason")
-        mock_import_cert.side_effect = raiser
-        skip_offline_nodes = False
 
-        assert_raise_library_error(
-            lambda: lib._add_device_model_net(
-                self.lib_env,
-                self.qnetd_host,
-                self.cluster_name,
-                self.nodes,
-                skip_offline_nodes
+        self.env_assist.assert_raise_library_error(
+            lambda: lib.add_device(
+                self.env_assist.get_env(),
+                "net",
+                {"host": "qnetd-host", "algorithm": "ffsplit"},
+                {"timeout": "20"},
+                {}
             ),
-            (
-                severity.ERROR,
-                report_codes.NODE_COMMUNICATION_ERROR,
-                {},
-                report_codes.SKIP_OFFLINE_NODES
+            [], # an empty LibraryError is raised
+            expected_in_processor=False
+        )
+
+        self.env_assist.assert_reports([
+            fixture.info(report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED),
+            fixture.error(
+                report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+                force_code=report_codes.SKIP_OFFLINE_NODES,
+                node=self.cluster_nodes[1],
+                command="remote/qdevice_net_client_init_certificate_storage",
+                reason="some error occurred",
             )
+        ])
+
+    @mock.patch("pcs.lib.corosync.qdevice_net.client_initialized", lambda: True)
+    def test_generate_cert_request_error(self):
+        self.config.runner.corosync.version()
+        self.config.corosync_conf.load(filename=self.corosync_conf_name)
+        self.fixture_config_http_get_ca_cert()
+        self.fixture_config_http_client_init()
+        self.config.runner.place(
+            "corosync-qdevice-net-certutil -r -n {cluster_name}".format(
+                cluster_name=self.cluster_name
+            ),
+            name="runner.corosync.qdevice.cert-request",
+            stderr="some error occurred",
+            returncode=1
         )
 
-        assert_report_item_list_equal(
-            self.mock_reporter.report_item_list,
+        self.env_assist.assert_raise_library_error(
+            lambda: lib.add_device(
+                self.env_assist.get_env(),
+                "net",
+                {"host": "qnetd-host", "algorithm": "ffsplit"},
+                {"timeout": "20"},
+                {}
+            ),
             [
-                (
-                    severity.INFO,
-                    report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
-                    {}
+                fixture.error(
+                    report_codes.QDEVICE_INITIALIZATION_ERROR,
+                    force_code=None,
+                    model="net",
+                    reason="some error occurred",
                 ),
+            ],
+            expected_in_processor=False
+        )
+
+        self.env_assist.assert_reports([
+            fixture.info(report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED),
+        ])
+
+    @mock.patch("pcs.lib.corosync.qdevice_net.client_initialized", lambda: True)
+    def test_sign_certificate_error(self):
+        self.config.runner.corosync.version()
+        self.config.corosync_conf.load(filename=self.corosync_conf_name)
+        self.fixture_config_http_get_ca_cert()
+        self.fixture_config_http_client_init()
+        self.fixture_config_runner_get_cert_request()
+        self.config.http.add_communication(
+            "http.sign_certificate_request",
+            [
+                {"label": self.qnetd_host, },
+            ],
+            action="remote/qdevice_net_sign_node_certificate",
+            param_list=[
                 (
-                    severity.INFO,
-                    report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
-                    {
-                        "node": self.nodes[0].label
-                    }
+                    "certificate_request",
+                    self.certs["cert_request"]["b64data"]
                 ),
-                (
-                    severity.ERROR,
-                    report_codes.NODE_COMMUNICATION_ERROR,
-                    {},
-                    report_codes.SKIP_OFFLINE_NODES
+                ("cluster_name", self.cluster_name),
+            ],
+            response_code=400,
+            output="some error occurred"
+        )
+
+        self.env_assist.assert_raise_library_error(
+            lambda: lib.add_device(
+                self.env_assist.get_env(),
+                "net",
+                {"host": "qnetd-host", "algorithm": "ffsplit"},
+                {"timeout": "20"},
+                {}
+            ),
+            [], # an empty LibraryError is raised
+            expected_in_processor=False
+        )
+
+        self.env_assist.assert_reports([
+            fixture.info(report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED),
+            fixture.error(
+                report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+                force_code=None,
+                node=self.qnetd_host,
+                command="remote/qdevice_net_sign_node_certificate",
+                reason="some error occurred",
+            )
+        ])
+
+    @mock.patch("pcs.lib.corosync.qdevice_net.client_initialized", lambda: True)
+    @mock.patch("pcs.lib.corosync.qdevice_net.write_tmpfile")
+    def test_certificate_to_pk12_error(self, mock_write_tmpfile):
+        tmpfile_instance = mock.MagicMock()
+        tmpfile_instance.name = rc("file.tmp")
+        mock_write_tmpfile.return_value = tmpfile_instance
+
+        self.config.runner.corosync.version()
+        self.config.corosync_conf.load(filename=self.corosync_conf_name)
+        self.fixture_config_http_get_ca_cert()
+        self.fixture_config_http_client_init()
+        self.fixture_config_runner_get_cert_request()
+        self.fixture_config_http_sign_cert_request()
+        self.config.runner.place(
+            "corosync-qdevice-net-certutil -M -c {file_path}".format(
+                file_path=tmpfile_instance.name
+            ),
+            name="runner.corosync.qdevice.cert-to-pk12",
+            stderr="some error occurred",
+            returncode=1
+        )
+
+        self.env_assist.assert_raise_library_error(
+            lambda: lib.add_device(
+                self.env_assist.get_env(),
+                "net",
+                {"host": "qnetd-host", "algorithm": "ffsplit"},
+                {"timeout": "20"},
+                {}
+            ),
+            [
+                fixture.error(
+                    report_codes.QDEVICE_CERTIFICATE_IMPORT_ERROR,
+                    force_code=None,
+                    reason="some error occurred",
                 ),
-            ]
+            ],
+            expected_in_processor=False
         )
-        mock_get_ca.assert_called_once_with(
-            "mock_communicator",
-            self.qnetd_host
+
+        self.env_assist.assert_reports([
+            fixture.info(report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED),
+        ])
+
+    @mock.patch("pcs.lib.corosync.qdevice_net.client_initialized", lambda: True)
+    @mock.patch("pcs.lib.corosync.qdevice_net.write_tmpfile")
+    def test_client_import_cert_error(self, mock_write_tmpfile):
+        tmpfile_instance = mock.MagicMock()
+        tmpfile_instance.name = rc("file.tmp")
+        mock_write_tmpfile.return_value = tmpfile_instance
+
+        self.config.runner.corosync.version()
+        self.config.corosync_conf.load(filename=self.corosync_conf_name)
+        self.fixture_config_http_get_ca_cert()
+        self.fixture_config_http_client_init()
+        self.fixture_config_runner_get_cert_request()
+        self.fixture_config_http_sign_cert_request()
+        self.fixture_config_runner_cert_to_pk12(tmpfile_instance.name)
+        self.config.http.add_communication(
+            "http.client_import_certificate",
+            [
+                {"label": self.cluster_nodes[0]},
+                {
+                    "label": self.cluster_nodes[1],
+                    "response_code": 400,
+                    "output": "some error occurred",
+                },
+                {"label": self.cluster_nodes[2]},
+            ],
+            action="remote/qdevice_net_client_import_certificate",
+            param_list=[
+                ("certificate", self.certs["final_cert"]["b64data"]),
+            ],
+            response_code=200,
         )
-        client_setup_calls = [
-            mock.call("mock_communicator", self.nodes[0], self.ca_cert),
-            mock.call("mock_communicator", self.nodes[1], self.ca_cert),
-        ]
-        self.assertEqual(
-            len(client_setup_calls),
-            len(mock_client_setup.mock_calls)
-        )
-        mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
-        mock_get_cert_request.assert_called_once_with(
-            "mock_runner",
-            self.cluster_name
-        )
-        mock_sign_cert_request.assert_called_once_with(
-            "mock_communicator",
-            self.qnetd_host,
-            self.cert_request,
-            self.cluster_name
-        )
-        mock_cert_to_pk12.assert_called_once_with(
-            "mock_runner",
-            self.signed_cert
-        )
-        client_import_calls = [
-            mock.call("mock_communicator", self.nodes[0], self.final_cert),
-            mock.call("mock_communicator", self.nodes[1], self.final_cert),
-        ]
-        self.assertEqual(
-            len(client_import_calls),
-            len(mock_import_cert.mock_calls)
+
+        self.env_assist.assert_raise_library_error(
+            lambda: lib.add_device(
+                self.env_assist.get_env(),
+                "net",
+                {"host": "qnetd-host", "algorithm": "ffsplit"},
+                {"timeout": "20"},
+                {}
+            ),
+            [], # an empty LibraryError is raised
+            expected_in_processor=False
         )
-        mock_import_cert.assert_has_calls(client_import_calls, any_order=True)
 
-    def test_client_import_cert_error_skip_offline(
-        self, mock_get_ca, mock_client_setup, mock_get_cert_request,
-        mock_sign_cert_request, mock_cert_to_pk12, mock_import_cert
-    ):
-        mock_get_ca.return_value = self.ca_cert
-        mock_get_cert_request.return_value = self.cert_request
-        mock_sign_cert_request.return_value = self.signed_cert
-        mock_cert_to_pk12.return_value = self.final_cert
-        def raiser(communicator, node, cert):
-            if node == self.nodes[1]:
-                raise NodeCommunicationException("host", "command", "reason")
-        mock_import_cert.side_effect = raiser
-        skip_offline_nodes = True
+        self.env_assist.assert_reports([
+            fixture.info(report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED),
+            fixture.info(
+                report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
+                node=self.cluster_nodes[0],
+            ),
+            fixture.error(
+                report_codes.NODE_COMMUNICATION_COMMAND_UNSUCCESSFUL,
+                force_code=report_codes.SKIP_OFFLINE_NODES,
+                node=self.cluster_nodes[1],
+                command="remote/qdevice_net_client_import_certificate",
+                reason="some error occurred",
+            ),
+            fixture.info(
+                report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
+                node=self.cluster_nodes[2],
+            ),
+        ])
 
-        lib._add_device_model_net(
-            self.lib_env,
-            self.qnetd_host,
-            self.cluster_name,
-            self.nodes,
-            skip_offline_nodes
+
+class RemoveDeviceHeuristics(TestCase):
+    def setUp(self):
+        self.env_assist, self.config = get_env_tools(self)
+
+    def test_disabled_on_cman(self):
+        self.config.runner.corosync.version(version="1.4.7")
+        self.env_assist.assert_raise_library_error(
+            lambda: lib.remove_device_heuristics(self.env_assist.get_env()),
+            [
+                fixture.error(report_codes.CMAN_UNSUPPORTED_COMMAND),
+            ],
+            expected_in_processor=False
         )
 
-        assert_report_item_list_equal(
-            self.mock_reporter.report_item_list,
+    def test_enabled_on_cman_if_not_live(self):
+        (self.config
+            .env.set_corosync_conf_data(open(rc("corosync-3nodes.conf")).read())
+        )
+        self.env_assist.assert_raise_library_error(
+            lambda: lib.remove_device_heuristics(self.env_assist.get_env()),
             [
-                (
-                    severity.INFO,
-                    report_codes.QDEVICE_CERTIFICATE_DISTRIBUTION_STARTED,
-                    {}
-                ),
-                (
-                    severity.INFO,
-                    report_codes.QDEVICE_CERTIFICATE_ACCEPTED_BY_NODE,
-                    {
-                        "node": self.nodes[0].label
-                    }
-                ),
-                (
-                    severity.WARNING,
-                    report_codes.NODE_COMMUNICATION_ERROR,
-                    {}
-                ),
-            ]
+                fixture.error(report_codes.QDEVICE_NOT_DEFINED),
+            ],
+            expected_in_processor=False
         )
-        mock_get_ca.assert_called_once_with(
-            "mock_communicator",
-            self.qnetd_host
+
+    def test_success(self):
+        config_no_heuristics = open(rc("corosync-3nodes-qdevice.conf")).read()
+        config_heuristics = config_no_heuristics.replace(
+            outdent("""\
+                    net {
+                        host: 127.0.0.1
+                    }
+            """),
+            outdent("""\
+                    net {
+                        host: 127.0.0.1
+                    }
+
+                    heuristics {
+                        mode: on
+                        exec_ls: test -f /tmp/test
+                    }
+            """)
         )
-        client_setup_calls = [
-            mock.call("mock_communicator", self.nodes[0], self.ca_cert),
-            mock.call("mock_communicator", self.nodes[1], self.ca_cert),
-        ]
-        self.assertEqual(
-            len(client_setup_calls),
-            len(mock_client_setup.mock_calls)
-        )
-        mock_client_setup.assert_has_calls(client_setup_calls, any_order=True)
-        mock_get_cert_request.assert_called_once_with(
-            "mock_runner",
-            self.cluster_name
-        )
-        mock_sign_cert_request.assert_called_once_with(
-            "mock_communicator",
-            self.qnetd_host,
-            self.cert_request,
-            self.cluster_name
-        )
-        mock_cert_to_pk12.assert_called_once_with(
-            "mock_runner",
-            self.signed_cert
-        )
-        client_import_calls = [
-            mock.call("mock_communicator", self.nodes[0], self.final_cert),
-            mock.call("mock_communicator", self.nodes[1], self.final_cert),
-        ]
-        self.assertEqual(
-            len(client_import_calls),
-            len(mock_import_cert.mock_calls)
+
+        self.config.runner.corosync.version()
+        self.config.corosync_conf.load_content(config_heuristics)
+        self.config.env.push_corosync_conf(
+            corosync_conf_text=config_no_heuristics
         )
-        mock_import_cert.assert_has_calls(client_import_calls, any_order=True)
+
+        lib.remove_device_heuristics(self.env_assist.get_env())
+
 
 
 @skip("TODO: rewrite using new testing fremework")
@@ -1973,7 +2309,7 @@ class UpdateDeviceTest(TestCase, CmanMixin):
     def test_disabled_on_cman(self, mock_get_corosync, mock_push_corosync):
         lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
         self.assert_disabled_on_cman(
-            lambda: lib.update_device(lib_env, {"host": "127.0.0.1"}, {})
+            lambda: lib.update_device(lib_env, {"host": "127.0.0.1"}, {}, {})
         )
         mock_get_corosync.assert_not_called()
         mock_push_corosync.assert_not_called()
@@ -1991,7 +2327,7 @@ class UpdateDeviceTest(TestCase, CmanMixin):
         )
 
         assert_raise_library_error(
-            lambda: lib.update_device(lib_env, {"host": "127.0.0.1"}, {}),
+            lambda: lib.update_device(lib_env, {"host": "127.0.0.1"}, {}, {}),
             (
                 severity.ERROR,
                 report_codes.QDEVICE_NOT_DEFINED,
@@ -2006,7 +2342,7 @@ class UpdateDeviceTest(TestCase, CmanMixin):
         lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
 
         assert_raise_library_error(
-            lambda: lib.update_device(lib_env, {"host": "127.0.0.1"}, {}),
+            lambda: lib.update_device(lib_env, {"host": "127.0.0.1"}, {}, {}),
             (
                 severity.ERROR,
                 report_codes.QDEVICE_NOT_DEFINED,
@@ -2025,14 +2361,25 @@ class UpdateDeviceTest(TestCase, CmanMixin):
         lib.update_device(
             lib_env,
             {"host": "127.0.0.2"},
-            {"timeout": "12345"}
+            {"timeout": "12345"},
+            {"mode": "on", "exec_ls": "test -f /tmp/test"}
         )
 
         self.assertEqual(1, len(mock_push_corosync.mock_calls))
         ac(
             mock_push_corosync.mock_calls[0][1][0].config.export(),
             original_conf
-                .replace("host: 127.0.0.1", "host: 127.0.0.2")
+                .replace(
+                    "            host: 127.0.0.1\n",
+                    outdent("""\
+                                host: 127.0.0.2
+                            }
+
+                            heuristics {
+                                exec_ls: test -f /tmp/test
+                                mode: on
+                    """)
+                )
                 .replace(
                     "model: net",
                     "model: net\n        timeout: 12345"
@@ -2041,6 +2388,40 @@ class UpdateDeviceTest(TestCase, CmanMixin):
         self.assertEqual([], self.mock_reporter.report_item_list)
 
     @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_success_heuristics_no_exec(
+        self, mock_get_corosync, mock_push_corosync
+    ):
+        original_conf = open(rc("corosync-3nodes-qdevice.conf")).read()
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        lib.update_device(lib_env, {}, {}, {"mode": "on"})
+
+        self.assertEqual(1, len(mock_push_corosync.mock_calls))
+        ac(
+            mock_push_corosync.mock_calls[0][1][0].config.export(),
+            original_conf
+                .replace(
+                    "            host: 127.0.0.1\n",
+                    outdent("""\
+                                host: 127.0.0.1
+                            }
+
+                            heuristics {
+                                mode: on
+                    """)
+                )
+        )
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                fixture.warn(
+                    report_codes.COROSYNC_QUORUM_HEURISTICS_ENABLED_WITH_NO_EXEC
+                )
+            ]
+        )
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
     def test_invalid_options(self, mock_get_corosync, mock_push_corosync):
         original_conf = open(rc("corosync-3nodes-qdevice.conf")).read()
         mock_get_corosync.return_value = original_conf
@@ -2050,7 +2431,8 @@ class UpdateDeviceTest(TestCase, CmanMixin):
             lambda: lib.update_device(
                 lib_env,
                 {},
-                {"bad_option": "bad_value", }
+                {"bad_option": "bad_value", },
+                {"mode": "bad mode"}
             ),
             (
                 severity.ERROR,
@@ -2059,9 +2441,17 @@ class UpdateDeviceTest(TestCase, CmanMixin):
                     "option_names": ["bad_option"],
                     "option_type": "quorum device",
                     "allowed": ["sync_timeout", "timeout"],
+                    "allowed_patterns": [],
                 },
                 report_codes.FORCE_OPTIONS
-            )
+            ),
+            fixture.error(
+                report_codes.INVALID_OPTION_VALUE,
+                force_code=report_codes.FORCE_OPTIONS,
+                option_name="mode",
+                option_value="bad mode",
+                allowed_values=("off", "on", "sync")
+            ),
         )
 
         self.assertEqual(1, mock_get_corosync.call_count)
@@ -2077,6 +2467,7 @@ class UpdateDeviceTest(TestCase, CmanMixin):
             lib_env,
             {},
             {"bad_option": "bad_value", },
+            {"mode": "bad mode"},
             force_options=True
         )
 
@@ -2090,8 +2481,15 @@ class UpdateDeviceTest(TestCase, CmanMixin):
                         "option_names": ["bad_option"],
                         "option_type": "quorum device",
                         "allowed": ["sync_timeout", "timeout"],
+                        "allowed_patterns": [],
                     }
-                )
+                ),
+                fixture.warn(
+                    report_codes.INVALID_OPTION_VALUE,
+                    option_name="mode",
+                    option_value="bad mode",
+                    allowed_values=("off", "on", "sync")
+                ),
             ]
         )
         self.assertEqual(1, mock_get_corosync.call_count)
@@ -2099,8 +2497,23 @@ class UpdateDeviceTest(TestCase, CmanMixin):
         ac(
             mock_push_corosync.mock_calls[0][1][0].config.export(),
             original_conf.replace(
-                "model: net",
-                "model: net\n        bad_option: bad_value"
+                outdent("""\
+
+                        net {
+                            host: 127.0.0.1
+                        }
+                """),
+                outdent("""\
+                        bad_option: bad_value
+
+                        net {
+                            host: 127.0.0.1
+                        }
+
+                        heuristics {
+                            mode: bad mode
+                        }
+                """)
             )
         )
 
diff --git a/pcs/test/test_lib_commands_sbd.py b/pcs/test/test_lib_commands_sbd.py
index 18b0d91..cc3a8d4 100644
--- a/pcs/test/test_lib_commands_sbd.py
+++ b/pcs/test/test_lib_commands_sbd.py
@@ -98,6 +98,7 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_names": ["SBD_UNKNOWN"],
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
+                        "allowed_patterns": [],
                     },
                     report_codes.FORCE_OPTIONS
                 ),
@@ -108,6 +109,7 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_names": ["another_unknown_option"],
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
+                        "allowed_patterns": [],
                     },
                     report_codes.FORCE_OPTIONS
                 )
@@ -133,6 +135,7 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_names": ["SBD_UNKNOWN"],
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
+                        "allowed_patterns": [],
                     },
                     None
                 ),
@@ -143,6 +146,7 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_names": ["another_unknown_option"],
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
+                        "allowed_patterns": [],
                     },
                     None
                 )
@@ -170,6 +174,7 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_names": ["SBD_WATCHDOG_DEV"],
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
+                        "allowed_patterns": [],
                     },
                     None
                 ),
@@ -180,6 +185,7 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_names": ["SBD_OPTS"],
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
+                        "allowed_patterns": [],
                     },
                     None
                 ),
@@ -190,6 +196,7 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_names": ["SBD_DEVICE"],
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
+                        "allowed_patterns": [],
                     },
                     None
                 )
@@ -216,6 +223,7 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_names": ["SBD_WATCHDOG_DEV"],
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
+                        "allowed_patterns": [],
                     },
                     None
                 ),
@@ -226,6 +234,7 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_names": ["SBD_OPTS"],
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
+                        "allowed_patterns": [],
                     },
                     None
                 ),
@@ -236,6 +245,7 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_names": ["SBD_UNKNOWN"],
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
+                        "allowed_patterns": [],
                     },
                     report_codes.FORCE_OPTIONS
                 )
@@ -263,6 +273,7 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_names": ["SBD_WATCHDOG_DEV"],
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
+                        "allowed_patterns": [],
                     },
                     None
                 ),
@@ -273,6 +284,7 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_names": ["SBD_OPTS"],
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
+                        "allowed_patterns": [],
                     },
                     None
                 ),
@@ -283,6 +295,7 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_names": ["SBD_UNKNOWN"],
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
+                        "allowed_patterns": [],
                     },
                     None
                 ),
@@ -293,6 +306,7 @@ class ValidateSbdOptionsTest(TestCase):
                         "option_names": ["SBD_PACEMAKER"],
                         "option_type": None,
                         "allowed": self.allowed_sbd_options,
+                        "allowed_patterns": [],
                     },
                     None
                 )
@@ -716,6 +730,7 @@ class InitializeBlockDevicesTest(CommonTest):
                     "option_names": sorted(["another_one", "unknown_option"]),
                     "option_type": "option",
                     "allowed": sorted(allowed_options),
+                    "allowed_patterns": [],
                 }
             ),
             *[
diff --git a/pcs/test/test_lib_corosync_config_facade.py b/pcs/test/test_lib_corosync_config_facade.py
index edc538d..f2b4c62 100644
--- a/pcs/test/test_lib_corosync_config_facade.py
+++ b/pcs/test/test_lib_corosync_config_facade.py
@@ -4,16 +4,18 @@ from __future__ import (
     print_function,
 )
 
-from pcs.test.tools.pcs_unittest import TestCase
 import re
+from textwrap import dedent
 
+from pcs.test.tools import fixture
 from pcs.test.tools.assertions import (
     ac,
     assert_raise_library_error,
     assert_report_item_list_equal,
 )
 from pcs.test.tools.custom_mock import MockLibraryReportProcessor
-from pcs.test.tools.misc import get_test_resource as rc
+from pcs.test.tools.misc import get_test_resource as rc, outdent
+from pcs.test.tools.pcs_unittest import TestCase
 
 from pcs.common import report_codes
 from pcs.lib.errors import ReportItemSeverity as severity
@@ -587,6 +589,7 @@ quorum {
                         "last_man_standing_window",
                         "wait_for_all"
                     ],
+                    "allowed_patterns": [],
                 }
             ),
             (
@@ -601,6 +604,7 @@ quorum {
                         "last_man_standing_window",
                         "wait_for_all"
                     ],
+                    "allowed_patterns": [],
                 }
             )
         )
@@ -722,7 +726,7 @@ class GetQuorumDeviceSettingsTest(TestCase):
         config = ""
         facade = lib.ConfigFacade.from_string(config)
         self.assertEqual(
-            (None, {}, {}),
+            (None, {}, {}, {}),
             facade.get_quorum_device_settings()
         )
         self.assertFalse(facade.need_stopped_cluster)
@@ -732,93 +736,138 @@ class GetQuorumDeviceSettingsTest(TestCase):
         config = open(rc("corosync.conf")).read()
         facade = lib.ConfigFacade.from_string(config)
         self.assertEqual(
-            (None, {}, {}),
+            (None, {}, {}, {}),
             facade.get_quorum_device_settings()
         )
         self.assertFalse(facade.need_stopped_cluster)
         self.assertFalse(facade.need_qdevice_reload)
 
     def test_empty_device(self):
-        config = """\
-quorum {
-    device {
-    }
-}
-"""
+        config = dedent("""\
+            quorum {
+                device {
+                }
+            }
+            """
+        )
         facade = lib.ConfigFacade.from_string(config)
         self.assertEqual(
-            (None, {}, {}),
+            (None, {}, {}, {}),
             facade.get_quorum_device_settings()
         )
         self.assertFalse(facade.need_stopped_cluster)
         self.assertFalse(facade.need_qdevice_reload)
 
     def test_no_model(self):
-        config = """\
-quorum {
-    device {
-        option: value
-        net {
-            host: 127.0.0.1
-        }
-    }
-}
-"""
+        config = dedent("""\
+            quorum {
+                device {
+                    option: value
+                    net {
+                        host: 127.0.0.1
+                    }
+                }
+            }
+            """
+        )
         facade = lib.ConfigFacade.from_string(config)
         self.assertEqual(
-            (None, {}, {"option": "value"}),
+            (None, {}, {"option": "value"}, {}),
             facade.get_quorum_device_settings()
         )
         self.assertFalse(facade.need_stopped_cluster)
         self.assertFalse(facade.need_qdevice_reload)
 
     def test_configured_properly(self):
-        config = """\
-quorum {
-    device {
-        option: value
-        model: net
-        net {
-            host: 127.0.0.1
-        }
-    }
-}
-"""
+        config = dedent("""\
+            quorum {
+                device {
+                    option: value
+                    model: net
+                    net {
+                        host: 127.0.0.1
+                    }
+                }
+            }
+            """
+        )
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertEqual(
+            ("net", {"host": "127.0.0.1"}, {"option": "value"}, {}),
+            facade.get_quorum_device_settings()
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertFalse(facade.need_qdevice_reload)
+
+    def test_configured_properly_heuristics(self):
+        config = dedent("""\
+            quorum {
+                device {
+                    option: value
+                    model: net
+                    net {
+                        host: 127.0.0.1
+                    }
+                    heuristics {
+                        mode: on
+                        exec_ls: test -f /tmp/test
+                    }
+                }
+            }
+            """
+        )
         facade = lib.ConfigFacade.from_string(config)
         self.assertEqual(
-            ("net", {"host": "127.0.0.1"}, {"option": "value"}),
+            (
+                "net",
+                {"host": "127.0.0.1"},
+                {"option": "value"},
+                {"exec_ls": "test -f /tmp/test", "mode": "on"}
+            ),
             facade.get_quorum_device_settings()
         )
         self.assertFalse(facade.need_stopped_cluster)
         self.assertFalse(facade.need_qdevice_reload)
 
     def test_more_devices_one_quorum(self):
-        config = """\
-quorum {
-    device {
-        option0: valueX
-        option1: value1
-        model: disk
-        net {
-            host: 127.0.0.1
-        }
-    }
-    device {
-        option0: valueY
-        option2: value2
-        model: net
-        disk {
-            path: /dev/quorum_disk
-        }
-    }
-}
-"""
+        config = dedent("""\
+            quorum {
+                device {
+                    option0: valueX
+                    option1: value1
+                    model: disk
+                    net {
+                        host: 127.0.0.1
+                    }
+                    heuristics {
+                        mode: sync
+                        exec_ls: test -f /tmp/test
+                    }
+                }
+                device {
+                    option0: valueY
+                    option2: value2
+                    model: net
+                    disk {
+                        path: /dev/quorum_disk
+                    }
+                    heuristics {
+                        mode: on
+                    }
+                    heuristics {
+                        timeout: 5
+                    }
+                }
+            }
+            """
+        )
         facade = lib.ConfigFacade.from_string(config)
         self.assertEqual(
             (
                 "net",
                 {"host": "127.0.0.1"},
-                {"option0": "valueY", "option1": "value1", "option2": "value2"}
+                {"option0": "valueY", "option1": "value1", "option2": "value2"},
+                {"exec_ls": "test -f /tmp/test", "mode": "on", "timeout": "5"}
             ),
             facade.get_quorum_device_settings()
         )
@@ -826,34 +875,46 @@ quorum {
         self.assertFalse(facade.need_qdevice_reload)
 
     def test_more_devices_more_quorum(self):
-        config = """\
-quorum {
-    device {
-        option0: valueX
-        option1: value1
-        model: disk
-        net {
-            host: 127.0.0.1
-        }
-    }
-}
-quorum {
-    device {
-        option0: valueY
-        option2: value2
-        model: net
-        disk {
-            path: /dev/quorum_disk
-        }
-    }
-}
-"""
+        config = dedent("""\
+            quorum {
+                device {
+                    option0: valueX
+                    option1: value1
+                    model: disk
+                    net {
+                        host: 127.0.0.1
+                    }
+                    heuristics {
+                        mode: sync
+                        exec_ls: test -f /tmp/test
+                    }
+                }
+            }
+            quorum {
+                device {
+                    option0: valueY
+                    option2: value2
+                    model: net
+                    disk {
+                        path: /dev/quorum_disk
+                    }
+                    heuristics {
+                        mode: on
+                    }
+                    heuristics {
+                        timeout: 5
+                    }
+                }
+            }
+            """
+        )
         facade = lib.ConfigFacade.from_string(config)
         self.assertEqual(
             (
                 "net",
                 {"host": "127.0.0.1"},
-                {"option0": "valueY", "option1": "value1", "option2": "value2"}
+                {"option0": "valueY", "option1": "value1", "option2": "value2"},
+                {"exec_ls": "test -f /tmp/test", "mode": "on", "timeout": "5"}
             ),
             facade.get_quorum_device_settings()
         )
@@ -862,26 +923,75 @@ quorum {
 
 
 class AddQuorumDeviceTest(TestCase):
+    def heuristic_no_exec_warning(self, mode, warn):
+        config = open(rc("corosync-3nodes.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.add_quorum_device(
+            reporter,
+            "net",
+            {"host": "127.0.0.1", "algorithm": "ffsplit"},
+            {},
+            {"mode": mode}
+        )
+        ac(
+            config.replace(
+                "    provider: corosync_votequorum\n",
+                outdent("""\
+                    provider: corosync_votequorum
+
+                    device {
+                        model: net
+                        votes: 1
+
+                        net {
+                            algorithm: ffsplit
+                            host: 127.0.0.1
+                        }
+
+                        heuristics {
+                            mode: *mode*
+                        }
+                    }
+                """.replace("*mode*", mode))
+            ),
+            facade.config.export()
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertFalse(facade.need_qdevice_reload)
+        expected_reports = []
+        if warn:
+            expected_reports.append(
+                fixture.warn(
+                    report_codes.COROSYNC_QUORUM_HEURISTICS_ENABLED_WITH_NO_EXEC
+                )
+            )
+        assert_report_item_list_equal(
+            reporter.report_item_list,
+            expected_reports
+        )
+
     def test_already_exists(self):
-        config = """\
-totem {
-    version: 2
-}
+        config = dedent("""\
+            totem {
+                version: 2
+            }
 
-quorum {
-    provider: corosync_votequorum
+            quorum {
+                provider: corosync_votequorum
 
-    device {
-        option: value
-        model: net
+                device {
+                    option: value
+                    model: net
+
+                    net {
+                        host: 127.0.0.1
+                        algorithm: ffsplit
+                    }
+                }
+            }
+            """)
 
-        net {
-            host: 127.0.0.1
-            algorithm: ffsplit
-        }
-    }
-}
-"""
         reporter = MockLibraryReportProcessor()
         facade = lib.ConfigFacade.from_string(config)
         assert_raise_library_error(
@@ -889,6 +999,7 @@ quorum {
                 reporter,
                 "net",
                 {"host": "127.0.0.1", "algorithm": "ffsplit"},
+                {},
                 {}
             ),
             (
@@ -909,6 +1020,7 @@ quorum {
             reporter,
             "net",
             {"host": "127.0.0.1", "algorithm": "ffsplit"},
+            {},
             {}
         )
         ac(
@@ -941,6 +1053,7 @@ quorum {
             reporter,
             "net",
             {"host": "127.0.0.1", "algorithm": "lms"},
+            {},
             {}
         )
         ac(
@@ -973,6 +1086,7 @@ quorum {
             reporter,
             "net",
             {"host": "127.0.0.1", "algorithm": "lms"},
+            {},
             {}
         )
         ac(
@@ -1014,29 +1128,47 @@ quorum {
             {
                 "timeout": "23456",
                 "sync_timeout": "34567"
+            },
+            {
+                "mode": "on",
+                "timeout": "5",
+                "sync_timeout": "15",
+                "interval": "30",
+                "exec_ping": 'ping -q -c 1 "127.0.0.1"',
+                "exec_ls": "test -f /tmp/test",
             }
         )
         ac(
             config.replace(
-                "    provider: corosync_votequorum",
-                """\
-    provider: corosync_votequorum
-
-    device {
-        sync_timeout: 34567
-        timeout: 23456
-        model: net
-        votes: 1
-
-        net {
-            algorithm: ffsplit
-            connect_timeout: 12345
-            force_ip_version: 4
-            host: 127.0.0.1
-            port: 4433
-            tie_breaker: lowest
-        }
-    }"""
+                "    provider: corosync_votequorum\n",
+                outdent("""\
+                    provider: corosync_votequorum
+
+                    device {
+                        sync_timeout: 34567
+                        timeout: 23456
+                        model: net
+                        votes: 1
+
+                        net {
+                            algorithm: ffsplit
+                            connect_timeout: 12345
+                            force_ip_version: 4
+                            host: 127.0.0.1
+                            port: 4433
+                            tie_breaker: lowest
+                        }
+
+                        heuristics {
+                            exec_ls: test -f /tmp/test
+                            exec_ping: ping -q -c 1 "127.0.0.1"
+                            interval: 30
+                            mode: on
+                            sync_timeout: 15
+                            timeout: 5
+                        }
+                    }
+                """)
             ),
             facade.config.export()
         )
@@ -1044,6 +1176,15 @@ quorum {
         self.assertFalse(facade.need_qdevice_reload)
         self.assertEqual([], reporter.report_item_list)
 
+    def test_succes_heuristics_on_no_exec(self):
+        self.heuristic_no_exec_warning("on", True)
+
+    def test_succes_heuristics_sync_no_exec(self):
+        self.heuristic_no_exec_warning("sync", True)
+
+    def test_succes_heuristics_off_no_exec(self):
+        self.heuristic_no_exec_warning("off", False)
+
     def test_remove_conflicting_options(self):
         config = open(rc("corosync.conf")).read()
         config = config.replace(
@@ -1063,6 +1204,7 @@ quorum {
             reporter,
             "net",
             {"host": "127.0.0.1", "algorithm": "ffsplit"},
+            {},
             {}
         )
         ac(
@@ -1091,48 +1233,57 @@ quorum {
         self.assertEqual([], reporter.report_item_list)
 
     def test_remove_old_configuration(self):
-        config = """\
-quorum {
-    provider: corosync_votequorum
-    device {
-        option: value_old1
-    }
-}
-quorum {
-    provider: corosync_votequorum
-    device {
-        option: value_old2
-    }
-}
-        """
+        config = dedent("""\
+            quorum {
+                provider: corosync_votequorum
+                device {
+                    option: value_old1
+                    heuristics {
+                        h_option: hvalue_old1
+                    }
+                }
+            }
+            quorum {
+                provider: corosync_votequorum
+                device {
+                    option: value_old2
+                    heuristics {
+                        h_option: hvalue_old2
+                    }
+                }
+            }
+            """
+        )
         reporter = MockLibraryReportProcessor()
         facade = lib.ConfigFacade.from_string(config)
         facade.add_quorum_device(
             reporter,
             "net",
             {"host": "127.0.0.1", "algorithm": "ffsplit"},
+            {},
             {}
         )
         ac(
-            """\
-quorum {
-    provider: corosync_votequorum
-}
+            dedent("""\
+                quorum {
+                    provider: corosync_votequorum
+                }
 
-quorum {
-    provider: corosync_votequorum
+                quorum {
+                    provider: corosync_votequorum
 
-    device {
-        model: net
-        votes: 1
+                    device {
+                        model: net
+                        votes: 1
 
-        net {
-            algorithm: ffsplit
-            host: 127.0.0.1
-        }
-    }
-}
-"""
+                        net {
+                            algorithm: ffsplit
+                            host: 127.0.0.1
+                        }
+                    }
+                }
+                """
+            )
             ,
             facade.config.export()
         )
@@ -1145,7 +1296,7 @@ quorum {
         reporter = MockLibraryReportProcessor()
         facade = lib.ConfigFacade.from_string(config)
         assert_raise_library_error(
-            lambda: facade.add_quorum_device(reporter, "invalid", {}, {}),
+            lambda: facade.add_quorum_device(reporter, "invalid", {}, {}, {}),
             (
                 severity.ERROR,
                 report_codes.INVALID_OPTION_VALUE,
@@ -1165,7 +1316,9 @@ quorum {
         config = open(rc("corosync-3nodes.conf")).read()
         reporter = MockLibraryReportProcessor()
         facade = lib.ConfigFacade.from_string(config)
-        facade.add_quorum_device(reporter, "invalid", {}, {}, force_model=True)
+        facade.add_quorum_device(
+            reporter, "invalid", {}, {}, {}, force_model=True
+        )
         ac(
             config.replace(
                 "    provider: corosync_votequorum",
@@ -1200,7 +1353,7 @@ quorum {
         reporter = MockLibraryReportProcessor()
         facade = lib.ConfigFacade.from_string(config)
         assert_raise_library_error(
-            lambda: facade.add_quorum_device(reporter, "net", {}, {}),
+            lambda: facade.add_quorum_device(reporter, "net", {}, {}, {}),
             (
                 severity.ERROR,
                 report_codes.REQUIRED_OPTION_IS_MISSING,
@@ -1234,6 +1387,19 @@ quorum {
                     "sync_timeout": "-3",
                     "bad_generic_option": "bad generic value",
                     "model": "some model",
+                },
+                {
+                    "mode": "bad mode",
+                    "timeout": "-5",
+                    "sync_timeout": "-15",
+                    "interval": "-30",
+                    "exec_ping": "",
+                    "exec_ls.bad": "test -f /tmp/test",
+                    "exec_ls:bad": "test -f /tmp/test",
+                    "exec_ls bad": "test -f /tmp/test",
+                    "exec_ls{bad": "test -f /tmp/test",
+                    "exec_ls}bad": "test -f /tmp/test",
+                    "exec_ls#bad": "test -f /tmp/test",
                 }
             ),
             (
@@ -1260,6 +1426,7 @@ quorum {
                         "port",
                         "tie_breaker",
                     ],
+                    "allowed_patterns": [],
                 },
                 report_codes.FORCE_OPTIONS
             ),
@@ -1315,6 +1482,7 @@ quorum {
                     "option_names": ["bad_generic_option"],
                     "option_type": "quorum device",
                     "allowed": ["sync_timeout", "timeout"],
+                    "allowed_patterns": [],
                 },
                 report_codes.FORCE_OPTIONS
             ),
@@ -1325,6 +1493,7 @@ quorum {
                     "option_names": ["model"],
                     "option_type": "quorum device",
                     "allowed": ["sync_timeout", "timeout"],
+                    "allowed_patterns": [],
                 }
             ),
             (
@@ -1346,6 +1515,51 @@ quorum {
                     "allowed_values": "positive integer",
                 },
                 report_codes.FORCE_OPTIONS
+            ),
+            fixture.error(
+                report_codes.INVALID_OPTION_VALUE,
+                force_code=report_codes.FORCE_OPTIONS,
+                option_name="mode",
+                option_value="bad mode",
+                allowed_values=("off", "on", "sync")
+            ),
+            fixture.error(
+                report_codes.INVALID_OPTION_VALUE,
+                force_code=report_codes.FORCE_OPTIONS,
+                option_name="interval",
+                option_value="-30",
+                allowed_values="a positive integer"
+            ),
+            fixture.error(
+                report_codes.INVALID_OPTION_VALUE,
+                force_code=report_codes.FORCE_OPTIONS,
+                option_name="sync_timeout",
+                option_value="-15",
+                allowed_values="a positive integer"
+            ),
+            fixture.error(
+                report_codes.INVALID_OPTION_VALUE,
+                force_code=report_codes.FORCE_OPTIONS,
+                option_name="timeout",
+                option_value="-5",
+                allowed_values="a positive integer"
+            ),
+            fixture.error(
+                report_codes.INVALID_OPTION_VALUE,
+                option_name="exec_ping",
+                option_value="",
+                allowed_values="a command to be run"
+            ),
+            fixture.error(
+                report_codes.INVALID_USERDEFINED_OPTIONS,
+                option_names=[
+                    "exec_ls bad", "exec_ls#bad", "exec_ls.bad", "exec_ls:bad",
+                    "exec_ls{bad", "exec_ls}bad",
+                ],
+                option_type="heuristics",
+                allowed_description=(
+                    "exec_NAME cannot contain '.:{}#' and whitespace characters"
+                )
             )
         )
         self.assertFalse(facade.need_stopped_cluster)
@@ -1358,7 +1572,7 @@ quorum {
         facade = lib.ConfigFacade.from_string(config)
         assert_raise_library_error(
             lambda: facade.add_quorum_device(
-                reporter, "net", {}, {},
+                reporter, "net", {}, {}, {},
                 force_model=True, force_options=True
             ),
             (
@@ -1378,7 +1592,7 @@ quorum {
         facade = lib.ConfigFacade.from_string(config)
         assert_raise_library_error(
             lambda: facade.add_quorum_device(
-                reporter, "net", {"host": "", "algorithm": ""}, {},
+                reporter, "net", {"host": "", "algorithm": ""}, {}, {},
                 force_model=True, force_options=True
             ),
             (
@@ -1392,6 +1606,46 @@ quorum {
         self.assertFalse(facade.need_qdevice_reload)
         ac(config, facade.config.export())
 
+    def test_cannot_force_bad_heuristics_exec_name(self):
+        config = open(rc("corosync-3nodes.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        assert_raise_library_error(
+            lambda: facade.add_quorum_device(
+                reporter,
+                "net",
+                {
+                    "host": "qnetd-host",
+                    "algorithm": "ffsplit",
+                },
+                {},
+                {
+                    "mode": "on",
+                    "exec_ls.bad": "test -f /tmp/test",
+                    "exec_ls:bad": "test -f /tmp/test",
+                    "exec_ls bad": "test -f /tmp/test",
+                    "exec_ls{bad": "test -f /tmp/test",
+                    "exec_ls}bad": "test -f /tmp/test",
+                    "exec_ls#bad": "test -f /tmp/test",
+                },
+                force_options=True
+            ),
+            fixture.error(
+                report_codes.INVALID_USERDEFINED_OPTIONS,
+                option_names=[
+                    "exec_ls bad", "exec_ls#bad", "exec_ls.bad", "exec_ls:bad",
+                    "exec_ls{bad", "exec_ls}bad",
+                ],
+                option_type="heuristics",
+                allowed_description=(
+                    "exec_NAME cannot contain '.:{}#' and whitespace characters"
+                )
+            )
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertFalse(facade.need_qdevice_reload)
+        ac(config, facade.config.export())
+
     def test_bad_options_net_forced(self):
         config = open(rc("corosync-3nodes.conf")).read()
         reporter = MockLibraryReportProcessor()
@@ -1413,32 +1667,48 @@ quorum {
                 "sync_timeout": "-3",
                 "bad_generic_option": "bad generic value",
             },
+            {
+                "mode": "bad mode",
+                "timeout": "-5",
+                "sync_timeout": "-15",
+                "interval": "-30",
+                "exec_ping": 'ping -q -c 1 "127.0.0.1"',
+            },
             force_options=True
         )
         self.assertFalse(facade.need_stopped_cluster)
         self.assertFalse(facade.need_qdevice_reload)
         ac(
             config.replace(
-                "    provider: corosync_votequorum",
-                """\
-    provider: corosync_votequorum
-
-    device {
-        bad_generic_option: bad generic value
-        sync_timeout: -3
-        timeout: -2
-        model: net
-
-        net {
-            algorithm: bad algorithm
-            bad_model_option: bad model value
-            connect_timeout: -1
-            force_ip_version: 3
-            host: 127.0.0.1
-            port: 65537
-            tie_breaker: 125
-        }
-    }"""
+                "    provider: corosync_votequorum\n",
+                outdent("""\
+                    provider: corosync_votequorum
+
+                    device {
+                        bad_generic_option: bad generic value
+                        sync_timeout: -3
+                        timeout: -2
+                        model: net
+
+                        net {
+                            algorithm: bad algorithm
+                            bad_model_option: bad model value
+                            connect_timeout: -1
+                            force_ip_version: 3
+                            host: 127.0.0.1
+                            port: 65537
+                            tie_breaker: 125
+                        }
+
+                        heuristics {
+                            exec_ping: ping -q -c 1 "127.0.0.1"
+                            interval: -30
+                            mode: bad mode
+                            sync_timeout: -15
+                            timeout: -5
+                        }
+                    }
+                """)
             ),
             facade.config.export()
         )
@@ -1468,6 +1738,7 @@ quorum {
                             "port",
                             "tie_breaker",
                         ],
+                        "allowed_patterns": [],
                     }
                 ),
                 (
@@ -1513,6 +1784,7 @@ quorum {
                         "option_names": ["bad_generic_option"],
                         "option_type": "quorum device",
                         "allowed": ["sync_timeout", "timeout"],
+                        "allowed_patterns": [],
                     }
                 ),
                 (
@@ -1532,7 +1804,31 @@ quorum {
                         "option_value": "-2",
                         "allowed_values": "positive integer",
                     }
-                )
+                ),
+                fixture.warn(
+                    report_codes.INVALID_OPTION_VALUE,
+                    option_name="mode",
+                    option_value="bad mode",
+                    allowed_values=("off", "on", "sync")
+                ),
+                fixture.warn(
+                    report_codes.INVALID_OPTION_VALUE,
+                    option_name="interval",
+                    option_value="-30",
+                    allowed_values="a positive integer"
+                ),
+                fixture.warn(
+                    report_codes.INVALID_OPTION_VALUE,
+                    option_name="sync_timeout",
+                    option_value="-15",
+                    allowed_values="a positive integer"
+                ),
+                fixture.warn(
+                    report_codes.INVALID_OPTION_VALUE,
+                    option_name="timeout",
+                    option_value="-5",
+                    allowed_values="a positive integer"
+                ),
             ]
         )
 
@@ -1545,6 +1841,7 @@ quorum {
                 reporter,
                 "net",
                 {"host": "127.0.0.1", "algorithm": "test"},
+                {},
                 {}
             ),
             (
@@ -1565,6 +1862,7 @@ quorum {
                 reporter,
                 "net",
                 {"host": "127.0.0.1", "algorithm": "2nodelms"},
+                {},
                 {}
             ),
             (
@@ -1584,20 +1882,54 @@ class UpdateQuorumDeviceTest(TestCase):
     def fixture_add_device(self, config, votes=None):
         with_device = re.sub(
             re.compile(r"quorum {[^}]*}", re.MULTILINE | re.DOTALL),
-            """\
-quorum {
-    provider: corosync_votequorum
-
-    device {
-        timeout: 12345
-        model: net
+            dedent("""\
+                quorum {
+                    provider: corosync_votequorum
+
+                    device {
+                        timeout: 12345
+                        model: net
+
+                        net {
+                            host: 127.0.0.1
+                            port: 4433
+                        }
+                    }
+                }"""
+            ),
+            config
+        )
+        if votes:
+            with_device = with_device.replace(
+                "model: net",
+                "model: net\n        votes: {0}".format(votes)
+            )
+        return with_device
 
-        net {
-            host: 127.0.0.1
-            port: 4433
-        }
-    }
-}""",
+    def fixture_add_device_with_heuristics(self, config, votes=None):
+        with_device = re.sub(
+            re.compile(r"quorum {[^}]*}", re.MULTILINE | re.DOTALL),
+            dedent("""\
+                quorum {
+                    provider: corosync_votequorum
+
+                    device {
+                        timeout: 12345
+                        model: net
+
+                        net {
+                            host: 127.0.0.1
+                            port: 4433
+                        }
+
+                        heuristics {
+                            exec_ls: test -f /tmp/test
+                            interval: 30
+                            mode: on
+                        }
+                    }
+                }"""
+            ),
             config
         )
         if votes:
@@ -1607,6 +1939,44 @@ quorum {
             )
         return with_device
 
+    def heuristic_no_exec_warning(
+        self, config, heuristics_options, expected_heuristics, warn
+    ):
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.update_quorum_device(reporter, {}, {}, heuristics_options)
+
+        expected_config = re.sub(
+            re.compile(r"\s*heuristics {[^}]*}", re.MULTILINE | re.DOTALL),
+            "",
+            config
+        )
+        expected_config = expected_config.replace(
+            "            port: 4433\n        }\n",
+            outdent("""\
+                        port: 4433
+                    }
+
+            *heuristics*
+            """)
+            .replace("*heuristics*\n", expected_heuristics)
+        )
+
+        ac(expected_config, facade.config.export())
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertTrue(facade.need_qdevice_reload)
+        expected_reports = []
+        if warn:
+            expected_reports.append(
+                fixture.warn(
+                    report_codes.COROSYNC_QUORUM_HEURISTICS_ENABLED_WITH_NO_EXEC
+                )
+            )
+        assert_report_item_list_equal(
+            reporter.report_item_list,
+            expected_reports
+        )
+
     def test_not_existing(self):
         config = open(rc("corosync.conf")).read()
         reporter = MockLibraryReportProcessor()
@@ -1615,6 +1985,7 @@ quorum {
             lambda: facade.update_quorum_device(
                 reporter,
                 {"host": "127.0.0.1"},
+                {},
                 {}
             ),
             (
@@ -1627,6 +1998,23 @@ quorum {
         self.assertFalse(facade.need_qdevice_reload)
         ac(config, facade.config.export())
 
+    def test_not_existing_add_heuristics(self):
+        config = open(rc("corosync.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        assert_raise_library_error(
+            lambda: facade.update_quorum_device(
+                reporter,
+                {},
+                {},
+                {"mode": "on"}
+            ),
+            fixture.error(report_codes.QDEVICE_NOT_DEFINED)
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertFalse(facade.need_qdevice_reload)
+        ac(config, facade.config.export())
+
     def test_success_model_options_net(self):
         config = self.fixture_add_device(
             open(rc("corosync-3nodes.conf")).read(),
@@ -1637,6 +2025,7 @@ quorum {
         facade.update_quorum_device(
             reporter,
             {"host": "127.0.0.2", "port": "", "algorithm": "ffsplit"},
+            {},
             {}
         )
         self.assertFalse(facade.need_stopped_cluster)
@@ -1656,7 +2045,7 @@ quorum {
         )
         reporter = MockLibraryReportProcessor()
         facade = lib.ConfigFacade.from_string(config)
-        facade.update_quorum_device(reporter, {"port": "4444"}, {})
+        facade.update_quorum_device(reporter, {"port": "4444"}, {}, {})
         self.assertFalse(facade.need_stopped_cluster)
         self.assertTrue(facade.need_qdevice_reload)
         ac(
@@ -1678,6 +2067,7 @@ quorum {
             lambda: facade.update_quorum_device(
                 reporter,
                 {"host": "", "algorithm": ""},
+                {},
                 {}
             ),
             (
@@ -1712,6 +2102,7 @@ quorum {
                 reporter,
                 {"host": "", "algorithm": ""},
                 {},
+                {},
                 force_options=True
             ),
             (
@@ -1742,6 +2133,7 @@ quorum {
                     "tie_breaker": "125",
                     "bad_model_option": "bad model value",
                 },
+                {},
                 {}
             ),
             (
@@ -1768,6 +2160,7 @@ quorum {
                         "port",
                         "tie_breaker",
                     ],
+                    "allowed_patterns": [],
                 },
                 report_codes.FORCE_OPTIONS
             ),
@@ -1833,6 +2226,7 @@ quorum {
                 "bad_model_option": "bad model value",
             },
             {},
+            {},
             force_options=True
         )
         self.assertFalse(facade.need_stopped_cluster)
@@ -1877,6 +2271,7 @@ quorum {
                             "port",
                             "tie_breaker",
                         ],
+                        "allowed_patterns": [],
                     },
                 ),
                 (
@@ -1927,7 +2322,8 @@ quorum {
         facade.update_quorum_device(
             reporter,
             {},
-            {"timeout": "", "sync_timeout": "23456"}
+            {"timeout": "", "sync_timeout": "23456"},
+            {}
         )
         self.assertFalse(facade.need_stopped_cluster)
         self.assertTrue(facade.need_qdevice_reload)
@@ -1940,8 +2336,8 @@ quorum {
         )
         self.assertEqual([], reporter.report_item_list)
 
-    def test_success_both_options(self):
-        config = self.fixture_add_device(
+    def test_success_all_options(self):
+        config = self.fixture_add_device_with_heuristics(
             open(rc("corosync-3nodes.conf")).read()
         )
         reporter = MockLibraryReportProcessor()
@@ -1949,7 +2345,8 @@ quorum {
         facade.update_quorum_device(
             reporter,
             {"port": "4444"},
-            {"timeout": "23456"}
+            {"timeout": "23456"},
+            {"interval": "35"}
         )
         self.assertFalse(facade.need_stopped_cluster)
         self.assertTrue(facade.need_qdevice_reload)
@@ -1957,6 +2354,7 @@ quorum {
             config
                 .replace("port: 4433", "port: 4444")
                 .replace("timeout: 12345", "timeout: 23456")
+                .replace("interval: 30", "interval: 35")
             ,
             facade.config.export()
         )
@@ -1977,7 +2375,8 @@ quorum {
                     "sync_timeout": "-3",
                     "bad_generic_option": "bad generic value",
                     "model": "some model",
-                }
+                },
+                {}
             ),
             (
                 severity.ERROR,
@@ -1986,6 +2385,7 @@ quorum {
                     "option_names": ["bad_generic_option"],
                     "option_type": "quorum device",
                     "allowed": ["sync_timeout", "timeout"],
+                    "allowed_patterns": [],
                 },
                 report_codes.FORCE_OPTIONS
             ),
@@ -1996,6 +2396,7 @@ quorum {
                     "option_names": ["model"],
                     "option_type": "quorum device",
                     "allowed": ["sync_timeout", "timeout"],
+                    "allowed_patterns": [],
                 }
             ),
             (
@@ -2034,6 +2435,7 @@ quorum {
                 reporter,
                 {},
                 {"model": "some model", },
+                {},
                 force_options=True
             ),
             (
@@ -2043,6 +2445,7 @@ quorum {
                     "option_names": ["model"],
                     "option_type": "quorum device",
                     "allowed": ["sync_timeout", "timeout"],
+                    "allowed_patterns": [],
                 }
             )
         )
@@ -2064,6 +2467,7 @@ quorum {
                 "sync_timeout": "-3",
                 "bad_generic_option": "bad generic value",
             },
+            {},
             force_options=True
         )
         self.assertFalse(facade.need_stopped_cluster)
@@ -2089,6 +2493,7 @@ quorum {
                         "option_names": ["bad_generic_option"],
                         "option_type": "quorum device",
                         "allowed": ["sync_timeout", "timeout"],
+                        "allowed_patterns": [],
                     },
                 ),
                 (
@@ -2112,6 +2517,315 @@ quorum {
             ]
         )
 
+    def test_success_add_heuristics(self):
+        config = self.fixture_add_device(
+            open(rc("corosync-3nodes.conf")).read()
+        )
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.update_quorum_device(
+            reporter,
+            {},
+            {},
+            {"mode": "on", "exec_ls": "test -f /tmp/test", "interval": "30"}
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertTrue(facade.need_qdevice_reload)
+        ac(
+            self.fixture_add_device_with_heuristics(
+                open(rc("corosync-3nodes.conf")).read()
+            ),
+            facade.config.export()
+        )
+        self.assertEqual([], reporter.report_item_list)
+
+    def test_success_heuristics_add_on_no_exec(self):
+        self.heuristic_no_exec_warning(
+            self.fixture_add_device(open(rc("corosync.conf")).read()),
+            {"mode": "on"},
+            outdent("""\
+                    heuristics {
+                        mode: on
+                    }
+            """),
+            True
+        )
+
+    def test_success_heuristics_add_sync_no_exec(self):
+        self.heuristic_no_exec_warning(
+            self.fixture_add_device(open(rc("corosync.conf")).read()),
+            {"mode": "sync"},
+            outdent("""\
+                    heuristics {
+                        mode: sync
+                    }
+            """),
+            True
+        )
+
+    def test_success_heuristics_add_off_no_exec(self):
+        self.heuristic_no_exec_warning(
+            self.fixture_add_device(open(rc("corosync.conf")).read()),
+            {"mode": "off"},
+            outdent("""\
+                    heuristics {
+                        mode: off
+                    }
+            """),
+            False
+        )
+
+    def test_success_heuristics_update_on_no_exec(self):
+        self.heuristic_no_exec_warning(
+            self.fixture_add_device_with_heuristics(
+                open(rc("corosync.conf")).read()
+            ),
+            {"mode": "on", "exec_ls": ""},
+            outdent("""\
+                    heuristics {
+                        interval: 30
+                        mode: on
+                    }
+            """),
+            True
+        )
+
+    def test_success_heuristics_update_sync_no_exec(self):
+        self.heuristic_no_exec_warning(
+            self.fixture_add_device_with_heuristics(
+                open(rc("corosync.conf")).read()
+            ),
+            {"mode": "sync", "exec_ls": ""},
+            outdent("""\
+                    heuristics {
+                        interval: 30
+                        mode: sync
+                    }
+            """),
+            True
+        )
+
+    def test_success_heuristics_update_off_no_exec(self):
+        self.heuristic_no_exec_warning(
+            self.fixture_add_device_with_heuristics(
+                open(rc("corosync.conf")).read()
+            ),
+            {"mode": "off", "exec_ls": ""},
+            outdent("""\
+                    heuristics {
+                        interval: 30
+                        mode: off
+                    }
+            """),
+            False
+        )
+
+    def test_success_heuristics_update_exec_present(self):
+        self.heuristic_no_exec_warning(
+            self.fixture_add_device_with_heuristics(
+                open(rc("corosync.conf")).read()
+            ),
+            {"exec_ls": "", "exec_ping": "ping example.com"},
+            outdent("""\
+                    heuristics {
+                        interval: 30
+                        mode: on
+                        exec_ping: ping example.com
+                    }
+            """),
+            False
+        )
+
+    def test_success_heuristics_update_exec_kept(self):
+        self.heuristic_no_exec_warning(
+            self.fixture_add_device_with_heuristics(
+                open(rc("corosync.conf")).read()
+            ),
+            {"interval": "25"},
+            outdent("""\
+                    heuristics {
+                        exec_ls: test -f /tmp/test
+                        interval: 25
+                        mode: on
+                    }
+            """),
+            False
+        )
+
+    def test_success_remove_heuristics(self):
+        config = self.fixture_add_device_with_heuristics(
+            open(rc("corosync-3nodes.conf")).read()
+        )
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.update_quorum_device(
+            reporter,
+            {},
+            {},
+            {"mode": "", "exec_ls": "", "interval": ""}
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertTrue(facade.need_qdevice_reload)
+        ac(
+            self.fixture_add_device(
+                open(rc("corosync-3nodes.conf")).read()
+            ),
+            facade.config.export()
+        )
+        self.assertEqual([], reporter.report_item_list)
+
+    def test_success_change_heuristics(self):
+        config = self.fixture_add_device_with_heuristics(
+            open(rc("corosync-3nodes.conf")).read()
+        )
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.update_quorum_device(
+            reporter,
+            {},
+            {},
+            {"mode": "sync", "interval": "", "timeout": "20"}
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertTrue(facade.need_qdevice_reload)
+        ac(
+            config.replace(
+                "interval: 30\n            mode: on",
+                "mode: sync\n            timeout: 20",
+            ),
+            facade.config.export()
+        )
+        self.assertEqual([], reporter.report_item_list)
+
+    def test_heuristics_bad_options(self):
+        config = self.fixture_add_device(
+            open(rc("corosync-3nodes.conf")).read()
+        )
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        assert_raise_library_error(
+            lambda: facade.update_quorum_device(
+                reporter,
+                {},
+                {},
+                {
+                    "mode": "bad mode",
+                    "timeout": "-5",
+                    "sync_timeout": "-15",
+                    "interval": "-30",
+                    "exec_ls.bad": "test -f /tmp/test",
+                    "exec_ls:bad": "test -f /tmp/test",
+                    "exec_ls bad": "test -f /tmp/test",
+                    "exec_ls{bad": "test -f /tmp/test",
+                    "exec_ls}bad": "test -f /tmp/test",
+                    "exec_ls#bad": "test -f /tmp/test",
+                }
+            ),
+            fixture.error(
+                report_codes.INVALID_OPTION_VALUE,
+                force_code=report_codes.FORCE_OPTIONS,
+                option_name="mode",
+                option_value="bad mode",
+                allowed_values=("off", "on", "sync")
+            ),
+            fixture.error(
+                report_codes.INVALID_OPTION_VALUE,
+                force_code=report_codes.FORCE_OPTIONS,
+                option_name="interval",
+                option_value="-30",
+                allowed_values="a positive integer"
+            ),
+            fixture.error(
+                report_codes.INVALID_OPTION_VALUE,
+                force_code=report_codes.FORCE_OPTIONS,
+                option_name="sync_timeout",
+                option_value="-15",
+                allowed_values="a positive integer"
+            ),
+            fixture.error(
+                report_codes.INVALID_OPTION_VALUE,
+                force_code=report_codes.FORCE_OPTIONS,
+                option_name="timeout",
+                option_value="-5",
+                allowed_values="a positive integer"
+            ),
+            fixture.error(
+                report_codes.INVALID_USERDEFINED_OPTIONS,
+                option_names=[
+                    "exec_ls bad", "exec_ls#bad", "exec_ls.bad", "exec_ls:bad",
+                    "exec_ls{bad", "exec_ls}bad",
+                ],
+                option_type="heuristics",
+                allowed_description=(
+                    "exec_NAME cannot contain '.:{}#' and whitespace characters"
+                )
+            )
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertFalse(facade.need_qdevice_reload)
+        ac(config, facade.config.export())
+
+    def test_heuristics_bad_options_forced(self):
+        config = self.fixture_add_device_with_heuristics(
+            open(rc("corosync-3nodes.conf")).read()
+        )
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.update_quorum_device(
+            reporter,
+            {},
+            {},
+            {
+                "interval": "-30",
+                "mode": "bad mode",
+                "sync_timeout": "-15",
+                "timeout": "-5",
+            },
+            force_options=True
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertTrue(facade.need_qdevice_reload)
+        ac(
+            config.replace(
+                "interval: 30\n            mode: on",
+                (
+                    "interval: -30\n            mode: bad mode\n"
+                    "            sync_timeout: -15\n"
+                    "            timeout: -5"
+                ),
+            ),
+            facade.config.export()
+        )
+        assert_report_item_list_equal(
+            reporter.report_item_list,
+            [
+                fixture.warn(
+                    report_codes.INVALID_OPTION_VALUE,
+                    option_name="mode",
+                    option_value="bad mode",
+                    allowed_values=("off", "on", "sync")
+                ),
+                fixture.warn(
+                    report_codes.INVALID_OPTION_VALUE,
+                    option_name="interval",
+                    option_value="-30",
+                    allowed_values="a positive integer"
+                ),
+                fixture.warn(
+                    report_codes.INVALID_OPTION_VALUE,
+                    option_name="sync_timeout",
+                    option_value="-15",
+                    allowed_values="a positive integer"
+                ),
+                fixture.warn(
+                    report_codes.INVALID_OPTION_VALUE,
+                    option_name="timeout",
+                    option_value="-5",
+                    allowed_values="a positive integer"
+                ),
+            ]
+        )
+
 
 class RemoveQuorumDeviceTest(TestCase):
     def test_empty_config(self):
@@ -2215,3 +2929,116 @@ quorum {
             config_no_devices,
             facade.config.export()
         )
+
+
+class RemoveQuorumDeviceHeuristics(TestCase):
+    def test_error_on_empty_config(self):
+        config = ""
+        facade = lib.ConfigFacade.from_string(config)
+        assert_raise_library_error(
+            facade.remove_quorum_device_heuristics,
+            fixture.error(report_codes.QDEVICE_NOT_DEFINED)
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertFalse(facade.need_qdevice_reload)
+
+    def test_error_on_no_device(self):
+        config = open(rc("corosync-3nodes.conf")).read()
+        facade = lib.ConfigFacade.from_string(config)
+        assert_raise_library_error(
+            facade.remove_quorum_device_heuristics,
+            fixture.error(report_codes.QDEVICE_NOT_DEFINED)
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertFalse(facade.need_qdevice_reload)
+
+    def test_noop_on_no_heuristics(self):
+        config = open(rc("corosync-3nodes-qdevice.conf")).read()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.remove_quorum_device_heuristics()
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertTrue(facade.need_qdevice_reload)
+        ac(config, facade.config.export())
+
+    def test_remove_all_heuristics(self):
+        config_no_devices = open(rc("corosync-3nodes.conf")).read()
+        config_no_heuristics = re.sub(
+            re.compile(r"quorum {[^}]*}", re.MULTILINE | re.DOTALL),
+            dedent("""\
+                quorum {
+                    provider: corosync_votequorum
+
+                    device {
+                        model: net
+
+                        net {
+                            host: 127.0.0.1
+                        }
+                    }
+
+                    device {
+                        option: value
+                    }
+                }
+
+                quorum {
+                    device {
+                        model: net
+
+                        net {
+                            host: 127.0.0.2
+                        }
+                    }
+                }"""
+            ),
+            config_no_devices
+        )
+        config_heuristics = re.sub(
+            re.compile(r"quorum {[^}]*}", re.MULTILINE | re.DOTALL),
+            dedent("""\
+                quorum {
+                    provider: corosync_votequorum
+
+                    device {
+                        model: net
+
+                        net {
+                            host: 127.0.0.1
+                        }
+
+                        heuristics {
+                            mode: on
+                        }
+                    }
+
+                    device {
+                        option: value
+
+                        heuristics {
+                            interval: 3000
+                        }
+                    }
+                }
+
+                quorum {
+                    device {
+                        model: net
+
+                        net {
+                            host: 127.0.0.2
+                        }
+
+                        heuristics {
+                            exec_ls: test -f /tmp/test
+                        }
+                    }
+                }"""
+            ),
+            config_no_devices
+        )
+
+        facade = lib.ConfigFacade.from_string(config_heuristics)
+        facade.remove_quorum_device_heuristics()
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertTrue(facade.need_qdevice_reload)
+        ac(config_no_heuristics, facade.config.export())
diff --git a/pcs/test/test_lib_corosync_qdevice_net.py b/pcs/test/test_lib_corosync_qdevice_net.py
index f9c3f1a..60b67f6 100644
--- a/pcs/test/test_lib_corosync_qdevice_net.py
+++ b/pcs/test/test_lib_corosync_qdevice_net.py
@@ -450,10 +450,13 @@ class ClientSetupTest(TestCase):
     def test_success(self, mock_destroy):
         self.mock_runner.run.return_value = ("tool output", "", 0)
 
-        lib.client_setup(self.mock_runner, "certificate data".encode("utf-8"))
+        lib.client_setup(
+            self.mock_runner,
+            "qnetd CA certificate".encode("utf-8")
+        )
 
         self.assertEqual(
-            "certificate data".encode("utf-8"),
+            "qnetd CA certificate".encode("utf-8"),
             open(self.ca_file_path, "rb").read()
         )
         self.mock_runner.run.assert_called_once_with([
@@ -468,7 +471,7 @@ class ClientSetupTest(TestCase):
         assert_raise_library_error(
             lambda: lib.client_setup(
                 self.mock_runner,
-                "certificate data".encode("utf-8")
+                "qnetd CA certificate".encode("utf-8")
             ),
             (
                 severity.ERROR,
@@ -481,7 +484,7 @@ class ClientSetupTest(TestCase):
         )
 
         self.assertEqual(
-            "certificate data".encode("utf-8"),
+            "qnetd CA certificate".encode("utf-8"),
             open(self.ca_file_path, "rb").read()
         )
         self.mock_runner.run.assert_called_once_with([
diff --git a/pcs/test/test_quorum.py b/pcs/test/test_quorum.py
index 16ede00..1872214 100644
--- a/pcs/test/test_quorum.py
+++ b/pcs/test/test_quorum.py
@@ -5,17 +5,19 @@ from __future__ import (
 )
 
 import shutil
-from pcs.test.tools.pcs_unittest import TestCase
+from textwrap import dedent
 
 from pcs.test.tools.assertions import AssertPcsMixin
 from pcs.test.tools.misc import (
     get_test_resource as rc,
 )
 from pcs.test.tools.pcs_runner import PcsRunner
+from pcs.test.tools.pcs_unittest import TestCase
 
 
 coro_conf = rc("corosync.conf")
 coro_qdevice_conf = rc("corosync-3nodes-qdevice.conf")
+coro_qdevice_heuristics_conf = rc("corosync-3nodes-qdevice-heuristics.conf")
 temp_conf = rc("corosync.conf.tmp")
 
 
@@ -27,6 +29,9 @@ class TestBase(TestCase, AssertPcsMixin):
     def fixture_conf_qdevice(self):
         shutil.copy(coro_qdevice_conf, temp_conf)
 
+    def fixture_conf_qdevice_heuristics(self):
+        shutil.copy(coro_qdevice_heuristics_conf, temp_conf)
+
 
 class QuorumConfigTest(TestBase):
     def test_no_device(self):
@@ -90,56 +95,49 @@ Options:
 
 class DeviceAddTest(TestBase):
     def test_no_model_keyword(self):
-        self.assert_pcs_fail(
+        self.assert_pcs_fail_regardless_of_force(
             "quorum device add option=value host=127.0.0.1",
             stdout_start="\nUsage: pcs quorum <command>\n    device add "
         )
 
-        self.assert_pcs_fail(
-            "quorum device add option=value host=127.0.0.1 --force",
-            stdout_start="\nUsage: pcs quorum <command>\n    device add "
-        )
-
     def test_no_model_value(self):
-        self.assert_pcs_fail(
+        self.assert_pcs_fail_regardless_of_force(
             "quorum device add option=value model host=127.0.0.1",
             stdout_start="\nUsage: pcs quorum <command>\n    device add "
         )
-        self.assert_pcs_fail(
-            "quorum device add option=value model host=127.0.0.1 --force",
-            stdout_start="\nUsage: pcs quorum <command>\n    device add "
-        )
 
     def test_more_models(self):
-        self.assert_pcs_fail(
+        self.assert_pcs_fail_regardless_of_force(
             "quorum device add model net host=127.0.0.1 model disk",
-            stdout_start="\nUsage: pcs quorum <command>\n    device add "
-        )
-        self.assert_pcs_fail(
-            "quorum device add model net host=127.0.0.1 model disk --force",
-            stdout_start="\nUsage: pcs quorum <command>\n    device add "
+            "Error: 'model' cannot be used more than once\n"
         )
 
     def test_model_in_options(self):
-        self.assert_pcs_fail(
+        self.assert_pcs_fail_regardless_of_force(
             "quorum device add model=disk model net host=127.0.0.1",
             "Error: Model cannot be specified in generic options\n"
         )
-        self.assert_pcs_fail(
-            "quorum device add model=disk model net host=127.0.0.1 --force",
-            "Error: Model cannot be specified in generic options\n"
+
+    def test_more_heuristics(self):
+        self.assert_pcs_fail_regardless_of_force(
+            "quorum device add model net host=127.0.0.1 heuristics mode=on "
+                "heuristics 'exec_ls=test -f /tmp/test'"
+            ,
+            "Error: 'heuristics' cannot be used more than once\n"
+        )
+
+    def test_bad_keyword(self):
+        self.assert_pcs_fail_regardless_of_force(
+            "quorum device add model net host=127.0.0.1 heuristic mode=on",
+            "Error: missing value of 'heuristic' option\n"
         )
 
     def test_device_already_set(self):
         self.fixture_conf_qdevice()
-        self.assert_pcs_fail(
+        self.assert_pcs_fail_regardless_of_force(
             "quorum device add model net host=127.0.0.1",
             "Error: quorum device is already defined\n"
         )
-        self.assert_pcs_fail(
-            "quorum device add model net host=127.0.0.1 --force",
-            "Error: quorum device is already defined\n"
-        )
 
     def test_success_model_only(self):
         self.assert_pcs_success(
@@ -147,74 +145,149 @@ class DeviceAddTest(TestBase):
         )
         self.assert_pcs_success(
             "quorum config",
-            """\
-Options:
-Device:
-  Model: net
-    algorithm: lms
-    host: 127.0.0.1
-"""
+            dedent("""\
+                Options:
+                Device:
+                  Model: net
+                    algorithm: lms
+                    host: 127.0.0.1
+                """
+            )
         )
 
     def test_succes_generic_and_model_options(self):
         self.assert_pcs_success(
-            "quorum device add timeout=12345 model net host=127.0.0.1 algorithm=ffsplit"
+            "quorum device add timeout=12345 model net host=127.0.0.1 "
+                "algorithm=ffsplit"
         )
         self.assert_pcs_success(
             "quorum config",
-            """\
-Options:
-Device:
-  timeout: 12345
-  votes: 1
-  Model: net
-    algorithm: ffsplit
-    host: 127.0.0.1
-"""
+            dedent("""\
+                Options:
+                Device:
+                  timeout: 12345
+                  votes: 1
+                  Model: net
+                    algorithm: ffsplit
+                    host: 127.0.0.1
+                """
+            )
+        )
+
+    def test_succes_model_options_and_heuristics(self):
+        self.assert_pcs_success(
+            "quorum device add model net host=127.0.0.1 algorithm=ffsplit "
+                "heuristics mode=on 'exec_ls=test -f /tmp/test'"
+        )
+        self.assert_pcs_success(
+            "quorum config",
+            dedent("""\
+                Options:
+                Device:
+                  votes: 1
+                  Model: net
+                    algorithm: ffsplit
+                    host: 127.0.0.1
+                  Heuristics:
+                    exec_ls: test -f /tmp/test
+                    mode: on
+                """
+            )
+        )
+
+    def test_succes_model_options_and_heuristics_no_exec(self):
+        self.assert_pcs_success(
+            "quorum device add model net host=127.0.0.1 algorithm=ffsplit "
+                "heuristics mode=on",
+            "Warning: No exec_NAME options are specified, so heuristics are "
+                "effectively disabled\n"
+        )
+        self.assert_pcs_success(
+            "quorum config",
+            dedent("""\
+                Options:
+                Device:
+                  votes: 1
+                  Model: net
+                    algorithm: ffsplit
+                    host: 127.0.0.1
+                  Heuristics:
+                    mode: on
+                """
+            )
+        )
+
+    def test_succes_all_options(self):
+        self.assert_pcs_success(
+            "quorum device add timeout=12345 model net host=127.0.0.1 "
+                "algorithm=ffsplit "
+                "heuristics mode=on 'exec_ls=test -f /tmp/test'"
+        )
+        self.assert_pcs_success(
+            "quorum config",
+            dedent("""\
+                Options:
+                Device:
+                  timeout: 12345
+                  votes: 1
+                  Model: net
+                    algorithm: ffsplit
+                    host: 127.0.0.1
+                  Heuristics:
+                    exec_ls: test -f /tmp/test
+                    mode: on
+                """
+            )
         )
 
     def test_missing_required_options(self):
-        self.assert_pcs_fail(
+        self.assert_pcs_fail_regardless_of_force(
             "quorum device add model net",
             "Error: required options 'algorithm', 'host' are missing\n"
         )
-        self.assert_pcs_fail(
-            "quorum device add model net --force",
-            "Error: required options 'algorithm', 'host' are missing\n"
-        )
 
     def test_bad_options(self):
         self.assert_pcs_fail(
-            "quorum device add a=b timeout=-1 model net host=127.0.0.1 algorithm=x c=d",
+            "quorum device add a=b timeout=-1 model net host=127.0.0.1 "
+                "algorithm=x c=d heuristics mode=bad e=f",
             """\
 Error: 'x' is not a valid algorithm value, use ffsplit, lms, use --force to override
 Error: invalid quorum device model option 'c', allowed options are: algorithm, connect_timeout, force_ip_version, host, port, tie_breaker, use --force to override
 Error: invalid quorum device option 'a', allowed options are: sync_timeout, timeout, use --force to override
 Error: '-1' is not a valid timeout value, use positive integer, use --force to override
+Error: 'bad' is not a valid mode value, use off, on, sync, use --force to override
+Error: invalid heuristics option 'e', allowed options are: interval, mode, sync_timeout, timeout and options matching patterns: exec_NAME, use --force to override
 """
         )
 
         self.assert_pcs_success(
-            "quorum device add a=b timeout=-1 model net host=127.0.0.1 algorithm=x c=d --force",
+            "quorum device add a=b timeout=-1 model net host=127.0.0.1 "
+                "algorithm=x c=d heuristics mode=bad e=f --force",
             """\
 Warning: 'x' is not a valid algorithm value, use ffsplit, lms
 Warning: invalid quorum device model option 'c', allowed options are: algorithm, connect_timeout, force_ip_version, host, port, tie_breaker
 Warning: invalid quorum device option 'a', allowed options are: sync_timeout, timeout
 Warning: '-1' is not a valid timeout value, use positive integer
+Warning: 'bad' is not a valid mode value, use off, on, sync
+Warning: invalid heuristics option 'e', allowed options are: interval, mode, sync_timeout, timeout and options matching patterns: exec_NAME
 """
         )
         self.assert_pcs_success(
             "quorum config",
-            """\
-Options:
-Device:
-  a: b
-  timeout: -1
-  Model: net
-    algorithm: x
-    c: d
-    host: 127.0.0.1
-"""
+            dedent("""\
+                Options:
+                Device:
+                  a: b
+                  timeout: -1
+                  Model: net
+                    algorithm: x
+                    c: d
+                    host: 127.0.0.1
+                  Heuristics:
+                    e: f
+                    mode: bad
+                """
+            )
         )
 
     def test_bad_model(self):
@@ -228,25 +301,22 @@ Device:
         )
         self.assert_pcs_success(
             "quorum config",
-            """\
-Options:
-Device:
-  Model: invalid
-    x: y
-"""
+            dedent("""\
+                Options:
+                Device:
+                  Model: invalid
+                    x: y
+                """
+            )
         )
 
 
 class DeviceRemoveTest(TestBase):
     def test_no_device(self):
-        self.assert_pcs_fail(
+        self.assert_pcs_fail_regardless_of_force(
             "quorum device remove",
             "Error: no quorum device is defined in this cluster\n"
         )
-        self.assert_pcs_fail(
-            "quorum device remove --force",
-            "Error: no quorum device is defined in this cluster\n"
-        )
 
     def test_success(self):
         self.fixture_conf_qdevice()
@@ -259,39 +329,61 @@ class DeviceRemoveTest(TestBase):
         )
 
     def test_bad_options(self):
-        self.assert_pcs_fail(
+        self.assert_pcs_fail_regardless_of_force(
             "quorum device remove net",
             stdout_start="\nUsage: pcs quorum <command>\n    device remove\n"
         )
-        self.assert_pcs_fail(
-            "quorum device remove net --force",
-            stdout_start="\nUsage: pcs quorum <command>\n    device remove\n"
+
+
+class DeviceHeuristicsRemove(TestBase):
+    def test_no_device(self):
+        self.assert_pcs_fail_regardless_of_force(
+            "quorum device heuristics remove",
+            "Error: no quorum device is defined in this cluster\n"
+        )
+
+    def test_bad_options(self):
+        self.assert_pcs_fail_regardless_of_force(
+            "quorum device heuristics remove option",
+            stdout_start="\nUsage: pcs quorum <command>\n    device heuristics "
+                "remove\n"
+        )
+
+    def test_success(self):
+        self.fixture_conf_qdevice_heuristics()
+        self.assert_pcs_success("quorum device heuristics remove")
+        self.assert_pcs_success(
+            "quorum config",
+            dedent("""\
+                Options:
+                Device:
+                  Model: net
+                    host: 127.0.0.1
+                """
+            )
         )
 
 
 class DeviceUpdateTest(TestBase):
     def test_no_device(self):
-        self.assert_pcs_fail(
+        self.assert_pcs_fail_regardless_of_force(
             "quorum device update option=new_value model host=127.0.0.2",
             "Error: no quorum device is defined in this cluster\n"
         )
-        self.assert_pcs_fail(
-            "quorum device update option=new_value model host=127.0.0.2 --force",
-            "Error: no quorum device is defined in this cluster\n"
-        )
 
     def test_generic_options_change(self):
         self.fixture_conf_qdevice()
         self.assert_pcs_success("quorum device update timeout=12345")
         self.assert_pcs_success(
             "quorum config",
-            """\
-Options:
-Device:
-  timeout: 12345
-  Model: net
-    host: 127.0.0.1
-"""
+            dedent("""\
+                Options:
+                Device:
+                  timeout: 12345
+                  Model: net
+                    host: 127.0.0.1
+                """
+            )
         )
 
     def test_model_options_change(self):
@@ -299,61 +391,108 @@ Device:
         self.assert_pcs_success("quorum device update model host=127.0.0.2")
         self.assert_pcs_success(
             "quorum config",
-            """\
-Options:
-Device:
-  Model: net
-    host: 127.0.0.2
-"""
+            dedent("""\
+                Options:
+                Device:
+                  Model: net
+                    host: 127.0.0.2
+                """
+            )
         )
 
-    def test_both_options_change(self):
+    def test_heuristic_options_change(self):
         self.fixture_conf_qdevice()
         self.assert_pcs_success(
-            "quorum device update timeout=12345 model host=127.0.0.2 port=1"
+            "quorum device update heuristics mode=on 'exec_ls=test -f /tmp/tst'"
         )
         self.assert_pcs_success(
             "quorum config",
-            """\
-Options:
-Device:
-  timeout: 12345
-  Model: net
-    host: 127.0.0.2
-    port: 1
-"""
+            dedent("""\
+                Options:
+                Device:
+                  Model: net
+                    host: 127.0.0.1
+                  Heuristics:
+                    exec_ls: test -f /tmp/tst
+                    mode: on
+                """
+            )
+        )
+
+    def test_heuristic_options_change_no_exec(self):
+        self.fixture_conf_qdevice()
+        self.assert_pcs_success(
+            "quorum device update heuristics mode=on",
+            "Warning: No exec_NAME options are specified, so heuristics are "
+                "effectively disabled\n"
+        )
+        self.assert_pcs_success(
+            "quorum config",
+            dedent("""\
+                Options:
+                Device:
+                  Model: net
+                    host: 127.0.0.1
+                  Heuristics:
+                    mode: on
+                """
+            )
+        )
+
+    def test_all_options_change(self):
+        self.fixture_conf_qdevice()
+        self.assert_pcs_success(
+            "quorum device update timeout=12345 model host=127.0.0.2 port=1 "
+            "heuristics mode=on 'exec_ls=test -f /tmp/test'"
+        )
+        self.assert_pcs_success(
+            "quorum config",
+            dedent("""\
+                Options:
+                Device:
+                  timeout: 12345
+                  Model: net
+                    host: 127.0.0.2
+                    port: 1
+                  Heuristics:
+                    exec_ls: test -f /tmp/test
+                    mode: on
+                """
+            )
+        )
+
+    def test_more_heuristics(self):
+        self.assert_pcs_fail_regardless_of_force(
+            "quorum device update model host=127.0.0.1 heuristics mode=on "
+                "heuristics 'exec_ls=test -f /tmp/test'"
+            ,
+            "Error: 'heuristics' cannot be used more than once\n"
+        )
+
+    def test_bad_keyword(self):
+        self.assert_pcs_fail_regardless_of_force(
+            "quorum device update model host=127.0.0.1 heuristic mode=on",
+            "Error: missing value of 'heuristic' option\n"
         )
 
     def test_more_models(self):
-        self.assert_pcs_fail(
+        self.assert_pcs_fail_regardless_of_force(
             "quorum device update model host=127.0.0.2 model port=1",
-            stdout_start="\nUsage: pcs quorum <command>\n    device update "
-        )
-        self.assert_pcs_fail(
-            "quorum device update model host=127.0.0.2 model port=1 --force",
-            stdout_start="\nUsage: pcs quorum <command>\n    device update "
+            "Error: 'model' cannot be used more than once\n"
         )
 
     def test_model_in_options(self):
-        self.assert_pcs_fail(
+        self.assert_pcs_fail_regardless_of_force(
             "quorum device update model=disk",
             "Error: Model cannot be specified in generic options\n"
         )
-        self.assert_pcs_fail(
-            "quorum device update model=disk --force",
-            "Error: Model cannot be specified in generic options\n"
-        )
 
     def test_missing_required_options(self):
         self.fixture_conf_qdevice()
-        self.assert_pcs_fail(
+        self.assert_pcs_fail_regardless_of_force(
             "quorum device update model host=",
             "Error: required option 'host' is missing\n"
         )
-        self.assert_pcs_fail(
-            "quorum device update model host= --force",
-            "Error: required option 'host' is missing\n"
-        )
 
     def test_bad_options(self):
         self.fixture_conf_qdevice()
@@ -377,14 +516,15 @@ Warning: '-1' is not a valid timeout value, use positive integer
         )
         self.assert_pcs_success(
             "quorum config",
-            """\
-Options:
-Device:
-  a: b
-  timeout: -1
-  Model: net
-    c: d
-    host: 127.0.0.1
-    port: x
-"""
+            dedent("""\
+                Options:
+                Device:
+                  a: b
+                  timeout: -1
+                  Model: net
+                    c: d
+                    host: 127.0.0.1
+                    port: x
+                """
+            )
         )
diff --git a/pcs/test/tools/command_env/assistant.py b/pcs/test/tools/command_env/assistant.py
index 6d8d607..a3fef28 100644
--- a/pcs/test/tools/command_env/assistant.py
+++ b/pcs/test/tools/command_env/assistant.py
@@ -8,13 +8,17 @@ import logging
 from functools import partial
 
 from pcs.lib.env import LibraryEnvironment
-from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.assertions import assert_raise_library_error, prepare_diff
 from pcs.test.tools.command_env.calls import Queue as CallQueue
 from pcs.test.tools.command_env.config import Config
 from pcs.test.tools.command_env.mock_push_cib import(
     get_push_cib,
     is_push_cib_call_in,
 )
+from pcs.test.tools.command_env.mock_push_corosync_conf import(
+    get_push_corosync_conf,
+    is_push_corosync_conf_call_in,
+)
 from pcs.test.tools.command_env.mock_runner import Runner
 from pcs.test.tools.command_env.mock_get_local_corosync_conf import(
     get_get_local_corosync_conf
@@ -34,11 +38,14 @@ def patch_env(call_queue, config, init_env):
     #by accident. Such test would fails on different machine (with another live
     #environment)
 
+    get_cmd_runner = init_env.cmd_runner
+    get_node_communicator = init_env.get_node_communicator
+
     patcher_list = [
         patch_lib_env(
             "cmd_runner",
             lambda env:
-            spy.Runner(init_env.cmd_runner()) if config.spy else Runner(
+            spy.Runner(get_cmd_runner()) if config.spy else Runner(
                 call_queue,
                 env_vars={} if not config.env.cib_tempfile else {
                     "CIB_file": config.env.cib_tempfile,
@@ -56,17 +63,24 @@ def patch_env(call_queue, config, init_env):
             "get_node_communicator",
             lambda env:
                 NodeCommunicator(call_queue) if not config.spy
-                else spy.NodeCommunicator(init_env.get_node_communicator())
+                else spy.NodeCommunicator(get_node_communicator())
         )
     ]
 
-    #It is not always desirable to patch the method push_cib. Some tests can
-    #patch only the internals (runner...). So push_cib is patched only when it
-    #is explicitly configured
+    # It is not always desirable to patch these methods. Some tests may patch
+    # only the internals (runner etc.). So these methods are only patched when
+    # it is explicitly configured.
     if is_push_cib_call_in(call_queue):
         patcher_list.append(
             patch_lib_env("push_cib", get_push_cib(call_queue))
         )
+    if is_push_corosync_conf_call_in(call_queue):
+        patcher_list.append(
+            patch_lib_env(
+                "push_corosync_conf",
+                get_push_corosync_conf(call_queue)
+            )
+        )
 
     for patcher in patcher_list:
         patcher.start()
@@ -96,6 +110,7 @@ class EnvAssistant(object):
         )
 
         self.__unpatch = None
+        self.__original_mocked_corosync_conf = None
 
         if test_case:
             test_case.addCleanup(self.cleanup)
@@ -126,6 +141,29 @@ class EnvAssistant(object):
                         repr(call) for call in self.__call_queue.remaining
                     ]))
                 )
+            # If pushing corosync.conf has not been patched in the
+            # LibraryEnvironment and the LibraryEnvironment was constructed
+            # with a mocked corosync.conf, check if it was changed without the
+            # change being specified in a test.
+            # If no env.push_corosync_conf call has been specified, no mocking
+            # occurs, any changes to corosync.conf are done just in memory and
+            # nothing gets reported. So an explicit check is necessary.
+            corosync_conf_orig = self.__original_mocked_corosync_conf
+            corosync_conf_env = self._env._corosync_conf_data
+            if (
+                corosync_conf_orig
+                and
+                corosync_conf_orig != corosync_conf_env
+            ):
+                raise AssertionError(
+                    (
+                        "An unexpected change to corosync.conf in "
+                        "LibraryEnvironment has been detected:\n{0}"
+                    ).format(
+                        prepare_diff(corosync_conf_orig, corosync_conf_env)
+                    )
+                )
+
 
     def get_env(self):
         self.__call_queue = CallQueue(self.__config.calls)
@@ -143,6 +181,13 @@ class EnvAssistant(object):
             )
         )
         self.__unpatch = patch_env(self.__call_queue, self.__config, self._env)
+        # If pushing corosync.conf has not been patched in the
+        # LibraryEnvironment, store any corosync.conf passed to the
+        # LibraryEnvironment for check for changes in cleanup.
+        if not is_push_corosync_conf_call_in(self.__call_queue):
+            self.__original_mocked_corosync_conf = (
+                self.__config.env.corosync_conf_data
+            )
         return self._env
 
     def assert_reports(self, reports):
diff --git a/pcs/test/tools/command_env/config.py b/pcs/test/tools/command_env/config.py
index dbe6d1a..fa1fd46 100644
--- a/pcs/test/tools/command_env/config.py
+++ b/pcs/test/tools/command_env/config.py
@@ -11,7 +11,7 @@ from pcs.test.tools.command_env.config_runner import RunnerConfig
 from pcs.test.tools.command_env.config_http import HttpConfig
 
 class Spy(object):
-    def __init__(self, auth_tokens=None, ports=None):
+    def __init__(self, auth_tokens, ports=None):
         self.auth_tokens = auth_tokens
         self.ports = ports
 
diff --git a/pcs/test/tools/command_env/config_corosync_conf.py b/pcs/test/tools/command_env/config_corosync_conf.py
index dd2e07c..cadea80 100644
--- a/pcs/test/tools/command_env/config_corosync_conf.py
+++ b/pcs/test/tools/command_env/config_corosync_conf.py
@@ -18,9 +18,9 @@ class CorosyncConf(object):
 
     def load(
         self, node_name_list=None, name="corosync_conf.load",
-        auto_tie_breaker=None
+        filename="corosync.conf", auto_tie_breaker=None
     ):
-        content = open(rc("corosync.conf")).read()
+        content = open(rc(filename)).read()
         corosync_conf = None
         if node_name_list:
             corosync_conf = ConfigFacade.from_string(content).config
diff --git a/pcs/test/tools/command_env/config_env.py b/pcs/test/tools/command_env/config_env.py
index 9f52629..5aaec15 100644
--- a/pcs/test/tools/command_env/config_env.py
+++ b/pcs/test/tools/command_env/config_env.py
@@ -5,6 +5,9 @@ from __future__ import (
 )
 
 from pcs.test.tools.command_env.mock_push_cib import Call as PushCibCall
+from pcs.test.tools.command_env.mock_push_corosync_conf import (
+    Call as PushCorosyncConfCall,
+)
 from pcs.test.tools.fixture_cib import modify_cib
 
 
@@ -82,3 +85,14 @@ class EnvConfig(object):
             ),
             instead=instead
         )
+
+    def push_corosync_conf(
+        self, name="env.push_corosync_conf", corosync_conf_text="",
+        skip_offline_targets=False, before=None, instead=None
+    ):
+        self.__calls.place(
+            name,
+            PushCorosyncConfCall(corosync_conf_text, skip_offline_targets),
+            instead=instead,
+            before=before
+        )
diff --git a/pcs/test/tools/command_env/config_runner_corosync.py b/pcs/test/tools/command_env/config_runner_corosync.py
index 63fc984..da8a46b 100644
--- a/pcs/test/tools/command_env/config_runner_corosync.py
+++ b/pcs/test/tools/command_env/config_runner_corosync.py
@@ -11,7 +11,13 @@ class CorosyncShortcuts(object):
     def __init__(self, calls):
         self.__calls = calls
 
-    def version(self, name="runner.corosync.version", version="2.4.0"):
+    def version(
+        self,
+        name="runner.corosync.version",
+        version="2.4.0",
+        instead=None,
+        before=None
+    ):
         self.__calls.place(
             name,
             RunnerCall(
@@ -22,10 +28,17 @@ class CorosyncShortcuts(object):
                     Copyright...
                     """.format(version)
                 )
-            )
+            ),
+            before=before,
+            instead=instead
         )
 
-    def reload(self, name="runner.corosync.reload"):
+    def reload(
+        self,
+        name="runner.corosync.reload",
+        instead=None,
+        before=None
+    ):
         self.__calls.place(
             name,
             RunnerCall(
@@ -36,5 +49,7 @@ class CorosyncShortcuts(object):
                     Done
                     """
                 )
-            )
+            ),
+            before=before,
+            instead=instead
         )
diff --git a/pcs/test/tools/command_env/config_runner_pcmk.py b/pcs/test/tools/command_env/config_runner_pcmk.py
index eb4cb5f..6499ef8 100644
--- a/pcs/test/tools/command_env/config_runner_pcmk.py
+++ b/pcs/test/tools/command_env/config_runner_pcmk.py
@@ -70,6 +70,47 @@ class PcmkShortcuts(object):
             instead=instead,
         )
 
+    def resource_cleanup(
+        self,
+        name="runner.pcmk.cleanup",
+        instead=None,
+        before=None,
+        resource=None,
+        node=None,
+        stdout="",
+        stderr="",
+        returncode=0
+    ):
+        """
+        Create a call for crm_resource --cleanup
+
+        string name -- the key of this call
+        string instead -- the key of a call instead of which this new call is to
+            be placed
+        string before -- the key of a call before which this new call is to be
+            placed
+        string resource -- the id of a resource to be cleaned
+        string node -- the name of the node where resources should be cleaned
+        string stdout -- crm_resource's stdout
+        string stderr -- crm_resource's stderr
+        int returncode -- crm_resource's returncode
+        """
+        cmd = ["crm_resource", "--cleanup"]
+        if resource:
+            cmd.extend(["--resource", resource])
+        if node:
+            cmd.extend(["--node", node])
+        self.__calls.place(
+            name,
+            RunnerCall(
+                " ".join(cmd),
+                stdout=stdout,
+                stderr=stderr,
+                returncode=returncode
+            ),
+            before=before,
+            instead=instead,
+        )
 
     def wait(
         self, name="runner.pcmk.wait", stderr="", returncode=None, timeout=None
diff --git a/pcs/test/tools/command_env/mock_push_cib.py b/pcs/test/tools/command_env/mock_push_cib.py
index 6776747..6cfdb4f 100644
--- a/pcs/test/tools/command_env/mock_push_cib.py
+++ b/pcs/test/tools/command_env/mock_push_cib.py
@@ -29,15 +29,15 @@ def get_push_cib(call_queue):
         if custom_cib is None and expected_call.custom_cib:
             raise AssertionError(
                 (
-                    "Trying to call push cib (call no. {0}) without custom cib,"
-                    " but a custom cib was expected"
+                    "Trying to call env.push_cib (call no. {0}) without "
+                    "a custom cib but a custom cib was expected"
                 ).format(i)
             )
         if custom_cib is not None and not expected_call.custom_cib:
             raise AssertionError(
                 (
-                    "Trying to call push cib (call no. {0}) with custom cib,"
-                    " but no custom cib was expected"
+                    "Trying to call env.push_cib (call no. {0}) with a custom "
+                    "cib but no custom cib was expected"
                 ).format(i)
             )
 
@@ -45,7 +45,7 @@ def get_push_cib(call_queue):
             expected_call.cib_xml,
             etree_to_str(lib_env.cib),
             (
-                "Trying to call env.push cib (call no. {0}) but cib in env does"
+                "Trying to call env.push_cib (call no. {0}) but cib in env does"
                 " not match\n\n"
             ).format(i)
         )
@@ -53,8 +53,8 @@ def get_push_cib(call_queue):
         if wait != expected_call.wait:
             raise AssertionError(
                 (
-                    "Trying to call push cib (call no. {0}) with 'wait' == {1}"
-                    " but expected was 'wait' == {2}"
+                    "Trying to call env.push_cib (call no. {0}) with 'wait' == "
+                    "{1} but it was expected 'wait' == {2}"
                 ).format(i, wait, expected_call.wait)
             )
 
diff --git a/pcs/test/tools/command_env/mock_push_corosync_conf.py b/pcs/test/tools/command_env/mock_push_corosync_conf.py
new file mode 100644
index 0000000..4d5d62e
--- /dev/null
+++ b/pcs/test/tools/command_env/mock_push_corosync_conf.py
@@ -0,0 +1,50 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+)
+
+from pcs.lib.corosync.config_facade import ConfigFacade
+from pcs.test.tools.assertions import prepare_diff
+
+CALL_TYPE_PUSH_COROSYNC_CONF = "CALL_TYPE_PUSH_COROSYNC_CONF"
+
+class Call(object):
+    type = CALL_TYPE_PUSH_COROSYNC_CONF
+
+    def __init__(self, corosync_conf_text, skip_offline_targets):
+        self.corosync_conf_text = corosync_conf_text
+        self.skip_offline_targets = skip_offline_targets
+
+    def __repr__(self):
+        return str("<CorosyncConfPush skip-offline='{0}'>").format(
+            self.skip_offline_targets
+        )
+
+def get_push_corosync_conf(call_queue):
+    def push_corosync_conf(
+        lib_env, corosync_conf_facade, skip_offline_nodes=False
+    ):
+        i, expected_call = call_queue.take(CALL_TYPE_PUSH_COROSYNC_CONF)
+
+        if not isinstance(corosync_conf_facade, ConfigFacade):
+            raise AssertionError(
+                (
+                    "Trying to call env.push_corosync_conf (call no. {0}) with"
+                    " {1} instead of lib.corosync.config_facade.ConfigFacade"
+                ).format(i, type(corosync_conf_facade))
+            )
+
+        to_push = corosync_conf_facade.config.export()
+        if to_push != expected_call.corosync_conf_text:
+            raise AssertionError(
+                "Trying to call env.push_corosync_conf but the pushed "
+                "corosync.conf is not as expected:\n{0}".format(
+                    prepare_diff(to_push, expected_call.corosync_conf_text)
+                )
+            )
+
+    return push_corosync_conf
+
+def is_push_corosync_conf_call_in(call_queue):
+    return call_queue.has_type(CALL_TYPE_PUSH_COROSYNC_CONF)
diff --git a/pcs/test/tools/command_env/mock_runner.py b/pcs/test/tools/command_env/mock_runner.py
index 6876d7a..10787f1 100644
--- a/pcs/test/tools/command_env/mock_runner.py
+++ b/pcs/test/tools/command_env/mock_runner.py
@@ -55,6 +55,9 @@ COMMAND_COMPLETIONS = {
     "corosync-cfgtool": path.join(
         settings.corosync_binaries, "corosync-cfgtool"
     ),
+    "corosync-qdevice-net-certutil": path.join(
+        settings.corosync_binaries, "corosync-qdevice-net-certutil"
+    ),
     "crm_diff": path.join(settings.pacemaker_binaries, "crm_diff"),
     "crm_mon": path.join(settings.pacemaker_binaries, "crm_mon"),
     "crm_resource": path.join(settings.pacemaker_binaries, "crm_resource"),
diff --git a/pcs/usage.py b/pcs/usage.py
index 7481014..090a150 100644
--- a/pcs/usage.py
+++ b/pcs/usage.py
@@ -474,13 +474,23 @@ Commands:
         override them with their own defined values.
 
     cleanup [<resource id>] [--node <node>]
-        Make the cluster forget the operation history of the resource and
-        re-detect its current state. This can be useful to purge knowledge of
-        past failures that have since been resolved. If a resource id is not
-        specified then all resources/stonith devices will be cleaned up. If a
-        node is not specified then resources/stonith devices on all nodes will
+        Make the cluster forget failed operations from history of the resource
+        and re-detect its current state. This can be useful to purge knowledge
+        of past failures that have since been resolved. If a resource id is not
+        specified then all resources / stonith devices will be cleaned up. If a
+        node is not specified then resources / stonith devices on all nodes will
         be cleaned up.
 
+    refresh [<resource id>] [--node <node>] [--full]
+        Make the cluster forget the complete operation history (including
+        failures) of the resource and re-detect its current state. If you are
+        interested in forgetting failed operations only, use the 'pcs resource
+        cleanup' command. If a resource id is not specified then all resources
+        / stonith devices will be refreshed. If a node is not specified then
+        resources / stonith devices on all nodes will be refreshed. Use --full
+        to refresh a resource on all nodes, otherwise only nodes where the
+        resource's state is known will be considered.
+
     failcount show <resource id> [node]
         Show current failcount for specified resource from all nodes or
         only on specified node.
@@ -877,12 +887,23 @@ Commands:
         not specified it defaults to 60 minutes.
 
     cleanup [<stonith id>] [--node <node>]
-        Make the cluster forget the operation history of the stonith device and
-        re-detect its current state. This can be useful to purge knowledge of
-        past failures that have since been resolved. If a stonith id is not
-        specified then all resources/stonith devices will be cleaned up. If a
-        node is not specified then resources/stonith devices on all nodes will
-        be cleaned up.
+        Make the cluster forget failed operations from history of the stonith
+        device and re-detect its current state. This can be useful to purge
+        knowledge of past failures that have since been resolved. If a stonith
+        id is not specified then all resources / stonith devices will be cleaned
+        up. If a node is not specified then resources / stonith devices on all
+        nodes will be cleaned up.
+
+    refresh [<stonith id>] [--node <node>] [--full]
+        Make the cluster forget the complete operation history (including
+        failures) of the stonith device and re-detect its current state. If you
+        are interested in forgetting failed operations only, use the 'pcs
+        stonith cleanup' command. If a stonith id is not specified then all
+        resources / stonith devices will be refreshed. If a node is not
+        specified then resources / stonith devices on all nodes will be
+        refreshed. Use --full to refresh a stonith device on all nodes,
+        otherwise only nodes where the stonith device's state is known will be
+        considered.
 
     level [config]
         Lists all of the fencing levels currently configured.
@@ -1526,11 +1547,16 @@ Commands:
         Show quorum runtime status.
 
     device add [<generic options>] model <device model> [<model options>]
+            [heuristics <heuristics options>]
         Add a quorum device to the cluster. Quorum device needs to be created
         first by "pcs qdevice setup" command. It is not possible to use more
-        than one quorum device in a cluster simultaneously. Generic options,
-        model and model options are all documented in corosync-qdevice(8) man
-        page.
+        than one quorum device in a cluster simultaneously. Models and options
+        are all documented in corosync-qdevice(8) man page; for heuristics
+        options check the quorum.device.heuristics subkey section, for model
+        options check the quorum.device.<device model> subkey sections.
+
+    device heuristics remove
+        Remove all heuristics settings of the configured quorum device.
 
     device remove
         Remove a quorum device from the cluster.
@@ -1540,9 +1566,12 @@ Commands:
         output.
 
     device update [<generic options>] [model <model options>]
-        Add/Change quorum device options. Generic options and model options are
-        all documented in corosync-qdevice(8) man page. Requires the cluster to
-        be stopped.
+            [heuristics <heuristics options>]
+        Add/Change quorum device options. Requires the cluster to be stopped.
+        Model and options are all documented in corosync-qdevice(8) man page;
+        for heuristics options check the quorum.device.heuristics subkey
+        section, for model options check the quorum.device.<device model> subkey
+        sections.
 
         WARNING: If you want to change "host" option of qdevice model net, use
         "pcs quorum device remove" and "pcs quorum device add" commands
diff --git a/pcs/utils.py b/pcs/utils.py
index cbaa7a4..5b60823 100644
--- a/pcs/utils.py
+++ b/pcs/utils.py
@@ -1043,18 +1043,11 @@ def cmd_runner():
     env_vars.update(os.environ)
     env_vars["LC_ALL"] = "C"
     return CommandRunner(
-        logging.getLogger("old_cli"),
+        logging.getLogger("pcs"),
         get_report_processor(),
         env_vars
     )
 
-def get_pcsd_dir():
-    pcs_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
-    if pcs_dir == "/usr/sbin":
-        return settings.pcsd_exec_location
-    else:
-        return os.path.join(pcs_dir, '../pcsd')
-
 def run_pcsdcli(command, data=None):
     if not data:
         data = dict()
@@ -1065,7 +1058,7 @@ def run_pcsdcli(command, data=None):
         env_var["PCSD_NETWORK_TIMEOUT"] = str(pcs_options["--request-timeout"])
     else:
         env_var["PCSD_NETWORK_TIMEOUT"] = str(settings.default_request_timeout)
-    pcsd_dir_path = get_pcsd_dir()
+    pcsd_dir_path = settings.pcsd_exec_location
     pcsdcli_path = os.path.join(pcsd_dir_path, 'pcsd-cli.rb')
     gem_home = os.path.join(pcsd_dir_path, 'vendor/bundle/ruby')
     env_var["GEM_HOME"] = gem_home
@@ -2788,7 +2781,7 @@ def get_lib_env():
             err("Unable to read %s: %s" % (conf, e.strerror))
 
     return LibraryEnvironment(
-        logging.getLogger("old_cli"),
+        logging.getLogger("pcs"),
         get_report_processor(),
         user,
         groups,
diff --git a/pcsd/bootstrap.rb b/pcsd/bootstrap.rb
index 3fac787..c47b1c4 100644
--- a/pcsd/bootstrap.rb
+++ b/pcsd/bootstrap.rb
@@ -49,7 +49,7 @@ def get_pcs_path()
   end
 end
 
-PCS_VERSION = '0.9.161'
+PCS_VERSION = '0.9.162'
 # unique instance signature, allows detection of dameon restarts
 DAEMON_INSTANCE_SIGNATURE = Digest::SHA2.hexdigest("#{Time.now} #{rand()}")
 COROSYNC = COROSYNC_BINARIES + "corosync"
@@ -89,3 +89,33 @@ def configure_logger(log_device)
   return logger
 end
 
+def get_capabilities(logger)
+  capabilities = []
+  capabilities_pcsd = []
+  begin
+    filename = (get_pcsd_path() + Pathname.new('capabilities.xml')).to_s
+    capabilities_xml = REXML::Document.new(File.new(filename))
+    capabilities_xml.elements.each('.//capability') { |feat_xml|
+      feat = {}
+      feat_xml.attributes.each() { |name, value|
+        feat[name] = value
+      }
+      feat['description'] = ''
+      if feat_xml.elements['description']
+        feat['description'] = feat_xml.elements['description'].text.strip
+      end
+      capabilities << feat
+    }
+    capabilities.each { |feat|
+      if feat['in-pcsd'] == '1'
+        capabilities_pcsd << feat['id']
+      end
+    }
+  rescue => e
+    logger.error(
+      "Cannot read capabilities definition file '#{filename}': '#{e}'"
+    )
+    return [], []
+  end
+  return capabilities, capabilities_pcsd
+end
diff --git a/pcsd/capabilities.xml b/pcsd/capabilities.xml
index ee9a822..3a827d5 100644
--- a/pcsd/capabilities.xml
+++ b/pcsd/capabilities.xml
@@ -232,7 +232,8 @@
       <description>
         Show and change corosync qdevice configuration.
 
-        pcs commands: quorum device (add | remove | update | status)
+        pcs commands: quorum device (add | remove | update | status),
+          quorum config
       </description>
     </capability>
     <capability id="corosync.quorum.device.client" in-pcs="0" in-pcsd="1">
@@ -243,11 +244,20 @@
           qdevice_client_enable, qdevice_client_disable
       </description>
     </capability>
+    <capability id="corosync.quorum.device.heuristics" in-pcs="1" in-pcsd="0">
+      <description>
+        Show and change corosync qdevice heuristics configuration.
+
+        pcs commands: quorum config, quorum device add ... heuristics,
+          quorum device update ... heuristics, quorum device heuristics remove
+      </description>
+    </capability>
     <capability id="corosync.quorum.device.model.net" in-pcs="1" in-pcsd="0">
       <description>
         Show and change corosync qdevice model "net" configuration.
 
-        pcs commands: quorum device (add | remove | update | status)
+        pcs commands: quorum device (add | remove | update | status),
+          quorum config
       </description>
     </capability>
     <capability id="corosync.quorum.device.client.model.net.certificates" in-pcs="0" in-pcsd="1">
@@ -1170,21 +1180,38 @@
     </capability>
     <capability id="pcmk.resource.cleanup" in-pcs="1" in-pcsd="0">
       <description>
-        Forget history of resources and redetect their current state. Optionally
-        specify a resource and/or a node.
+        Delete failed operations from history of resources and redetect their
+        current state. Optionally specify a resource and/or a node.
 
         pcs commands: resource cleanup
       </description>
     </capability>
     <capability id="pcmk.resource.cleanup.one-resource" in-pcs="1" in-pcsd="1">
       <description>
-        Forget history of a specified resource on all nodes and redetect its
-        current state.
+        Delete failed operations from history of a specified resource on all
+        nodes and redetect its current state.
 
         pcs commands: resource cleanup
         daemon urls: resource_cleanup
       </description>
     </capability>
+    <capability id="pcmk.resource.refresh" in-pcs="1" in-pcsd="0">
+      <description>
+        Forget history of resources and redetect their current state. Optionally
+        specify a resource and/or a node.
+
+        pcs commands: resource refresh
+      </description>
+    </capability>
+    <capability id="pcmk.resource.refresh.one-resource" in-pcs="1" in-pcsd="1">
+      <description>
+        Forget history of a specified resource on all nodes and redetect its
+        current state.
+
+        pcs commands: resource refresh
+        daemon urls: resource_refresh
+      </description>
+    </capability>
     <capability id="pcmk.resource.failcount" in-pcs="1" in-pcsd="0">
       <description>
         Show or reset failcount of a specified resource on all nodes or on
@@ -1292,10 +1319,18 @@
     </capability>
     <capability id="pcmk.stonith.cleanup" in-pcs="1" in-pcsd="0">
       <description>
+        Delete failed operations from history of stonith resources and redetect
+        their current state. Optionally specify a resource and/or a node.
+
+        pcs commands: stonith cleanup
+      </description>
+    </capability>
+    <capability id="pcmk.stonith.refresh" in-pcs="1" in-pcsd="0">
+      <description>
         Forget history of stonith resources and redetect their current state.
         Optionally specify a resource and/or a node.
 
-        pcs commands: stonith cleanup
+        pcs commands: stonith refresh
       </description>
     </capability>
     <capability id="pcmk.stonith.levels" in-pcs="1" in-pcsd="1">
diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb
index 22850e3..21092c5 100644
--- a/pcsd/cluster_entity.rb
+++ b/pcsd/cluster_entity.rb
@@ -567,7 +567,9 @@ module ClusterEntity
           # 7 == OCF_NOT_RUNNING == The resource is safely stopped.
           next if o.operation == 'monitor' and o.rc_code == 7
           # 8 == OCF_RUNNING_MASTER == The resource is running in master mode.
-          next if 8 == o.rc_code
+          # 193 == PCMK_OCF_UNKNOWN => The resource operation is still in
+          # progress.
+          next if [8, 193].include?(o.rc_code)
           failed_ops << o
           message = "Failed to #{o.operation} #{@id}"
           message += " on #{Time.at(o.last_rc_change).asctime}"
diff --git a/pcsd/pcsd-cli.rb b/pcsd/pcsd-cli.rb
index 1e4ed10..3c1d078 100755
--- a/pcsd/pcsd-cli.rb
+++ b/pcsd/pcsd-cli.rb
@@ -9,6 +9,7 @@ require 'orderedhash'
 require 'bootstrap.rb'
 require 'pcs.rb'
 require 'auth.rb'
+require 'remote.rb'
 
 def cli_format_response(status, text=nil, data=nil)
   response = OrderedHash.new
@@ -31,6 +32,10 @@ PCS = get_pcs_path()
 $logger_device = StringIO.new
 $logger = configure_logger($logger_device)
 
+capabilities, capabilities_pcsd = get_capabilities($logger)
+CAPABILITIES = capabilities.freeze
+CAPABILITIES_PCSD = capabilities_pcsd.freeze
+
 # check and set user
 uid = Process.uid
 if 0 == uid
@@ -118,8 +123,22 @@ allowed_commands = {
       pcsd_restart_nodes(auth_user_, params['nodes'] || [])
     }
   },
+  'node_status' => {
+    'only_superuser' => true,
+    'permissions' => Permissions::FULL,
+    'call' => lambda { |params, auth_user_|
+      return JSON.parse(node_status(
+        {
+          :version => '2',
+          :operations => '1',
+          :skip_auth_check => '1',
+        },
+        {},
+        auth_user_
+      ))
+    }
+  },
 }
-
 if allowed_commands.key?(command)
   begin
     params = JSON.parse(STDIN.read)
diff --git a/pcsd/pcsd.8 b/pcsd/pcsd.8
index 9e14b60..a824a48 100644
--- a/pcsd/pcsd.8
+++ b/pcsd/pcsd.8
@@ -1,4 +1,4 @@
-.TH PCSD "8" "November 2017" "pcs 0.9.161" "System Administration Utilities"
+.TH PCSD "8" "November 2017" "pcs 0.9.162" "System Administration Utilities"
 .SH NAME
 pcsd \- pacemaker/corosync configuration system daemon
 
@@ -99,5 +99,6 @@ This JSON file stores authentication tokens which are used to login to remote in
 
 .SH SEE ALSO
 .BR pcs (8)
+.BR pcs_snmp_agent (8)
 
 .BR curl (1)
diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
index f2dff9d..f97dabc 100644
--- a/pcsd/pcsd.rb
+++ b/pcsd/pcsd.rb
@@ -116,32 +116,7 @@ configure do
   $logger = configure_logger('/var/log/pcsd/pcsd.log')
   $semaphore_cfgsync = Mutex.new
 
-  capabilities = []
-  capabilities_pcsd = []
-  begin
-    filename = (get_pcsd_path() + Pathname.new('capabilities.xml')).to_s
-    capabilities_xml = REXML::Document.new(File.new(filename))
-    capabilities_xml.elements.each('.//capability') { |feat_xml|
-      feat = {}
-      feat_xml.attributes.each() { |name, value|
-        feat[name] = value
-      }
-      feat['description'] = ''
-      if feat_xml.elements['description']
-        feat['description'] = feat_xml.elements['description'].text.strip
-      end
-      capabilities << feat
-    }
-    capabilities.each { |feat|
-      if feat['in-pcsd'] == '1'
-        capabilities_pcsd << feat['id']
-      end
-    }
-  rescue => e
-    $logger.error(
-      "Cannot read capabilities definition file '#{filename}': '#{e}'"
-    )
-  end
+  capabilities, capabilities_pcsd = get_capabilities($logger)
   CAPABILITIES = capabilities.freeze
   CAPABILITIES_PCSD = capabilities_pcsd.freeze
 end
@@ -672,7 +647,8 @@ already been added to pcsd.  You may not add two clusters with the same name int
     end
     ports = {}
     node_list.each { |node|
-      ports[node] = (params["port-#{node}"] || '').strip
+      port = (params["port-#{node}"] || '').strip
+      ports[node] = port != '' ? port : nil
     }
     node_results = {}
     online, offline, notauthorized = check_gui_status_of_nodes(
diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js
index 953ba60..59bafa9 100644
--- a/pcsd/public/js/nodes-ember.js
+++ b/pcsd/public/js/nodes-ember.js
@@ -80,6 +80,9 @@ Pcs = Ember.Application.createWithMixins({
       (this.get("pcsd_capabilities").indexOf("pcmk.resource.manage-unmanage") != -1)
     );
   }.property("available_features", "pcsd_capabilities"),
+  is_supported_resource_refresh_one_resource: function() {
+    return this.get("pcsd_capabilities").indexOf("pcmk.resource.refresh.one-resource") != -1
+  }.property("pcsd_capabilities"),
   is_sbd_running: false,
   is_sbd_enabled: false,
   is_sbd_enabled_or_running: function() {
diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
index bf67a24..b7e9a7a 100644
--- a/pcsd/public/js/pcsd.js
+++ b/pcsd/public/js/pcsd.js
@@ -436,7 +436,7 @@ function verify_remove_nodes(node_id) {
 function verify_remove_resources(resource_id) {
   verify_remove(
     remove_resource, true, "resource_list", "dialog_verify_remove_resources",
-    "resource", "Remove resource(s)", "Resurce Removal", resource_id
+    "resource", "Remove resource(s)", "Resource Removal", resource_id
   );
 }
 
@@ -618,46 +618,76 @@ function disable_resource() {
   Pcs.resourcesContainer.disable_resource(curResource());
 }
 
-function cleanup_resource() {
-  var resource = curResource();
+function resource_stonith_cleanup_refresh(
+  resource, action_button_id, action_url, action_label, full_refresh
+) {
   if (resource == null) {
     return;
   }
-  fade_in_out("#resource_cleanup_link");
+  data = {"resource": resource}
+  if (full_refresh) {
+    data["full"] = 1
+  }
+  fade_in_out(action_button_id);
   ajax_wrapper({
     type: 'POST',
-    url: get_cluster_remote_url() + 'resource_cleanup',
-    data: {"resource": resource},
+    url: get_cluster_remote_url() + action_url,
+    data: data,
     success: function() {
     },
     error: function (xhr, status, error) {
       alert(
-        "Unable to cleanup resource '" + resource + "' "
+        "Unable to " + action_label + " resource '" + resource + "' "
         + ajax_simple_error(xhr, status, error)
       );
     }
   });
 }
 
+function cleanup_resource() {
+  resource_stonith_cleanup_refresh(
+    curResource(),
+    "#resource_cleanup_link",
+    "resource_cleanup",
+    "cleanup"
+  )
+}
+
 function cleanup_stonith() {
-  var resource = curStonith();
-  if (resource == null) {
-    return;
-  }
-  fade_in_out("#stonith_cleanup_link");
-  ajax_wrapper({
-    type: 'POST',
-    url: get_cluster_remote_url() + 'resource_cleanup',
-    data: {"resource": resource},
-    success: function() {
-    },
-    error: function (xhr, status, error) {
-      alert(
-        "Unable to cleanup resource '" + resource + "' "
-        + ajax_simple_error(xhr, status, error)
-      );
-    }
-  });
+  resource_stonith_cleanup_refresh(
+    curStonith(),
+    "#stonith_cleanup_link",
+    "resource_cleanup",
+    "cleanup"
+  )
+}
+
+function refresh_resource(refresh_supported) {
+  resource_stonith_cleanup_refresh(
+    curResource(),
+    "#resource_refresh_link",
+    /* previously, "refresh" was called "cleanup" */
+    refresh_supported ? "resource_refresh" : "resource_cleanup",
+    "refresh",
+    /* no way to set if we want to do the full (on all nodes) refresh from gui
+     * (yet);
+     * moreover the full refresh flag only works for refresh not for cleanup */
+    refresh_supported ? true : false
+  )
+}
+
+function refresh_stonith(refresh_supported) {
+  resource_stonith_cleanup_refresh(
+    curStonith(),
+    "#stonith_refresh_link",
+    /* previously, "refresh" was called "cleanup" */
+    refresh_supported ? "resource_refresh" : "resource_cleanup",
+    "refresh",
+    /* no way to set if we want to do the full (on all nodes) refresh from gui
+     * (yet);
+     * moreover the full refresh flag only works for refresh not for cleanup */
+    refresh_supported ? true : false
+  )
 }
 
 function checkExistingNode() {
diff --git a/pcsd/remote.rb b/pcsd/remote.rb
index cd7ec1f..e1e95a8 100644
--- a/pcsd/remote.rb
+++ b/pcsd/remote.rb
@@ -103,6 +103,7 @@ def remote(params, request, auth_user)
       :resource_start => method(:resource_start),
       :resource_stop => method(:resource_stop),
       :resource_cleanup => method(:resource_cleanup),
+      :resource_refresh => method(:resource_refresh),
       :update_resource => method(:update_resource),
       :update_fence_device => method(:update_fence_device),
       :get_avail_resource_agents => method(:get_avail_resource_agents),
@@ -1019,7 +1020,7 @@ def node_status(params, request, auth_user)
       'status?redirected=1',
       false,
       params.select { |k,_|
-        [:version, :operations].include?(k)
+        [:version, :operations, :skip_auth_check].include?(k)
       }
     )
   end
@@ -1040,20 +1041,22 @@ def node_status(params, request, auth_user)
 
   node = ClusterEntity::Node.load_current_node(crm_dom)
 
-  _,_,not_authorized_nodes = check_gui_status_of_nodes(
-    auth_user,
-    status[:known_nodes],
-    false,
-    3
-  )
+  if params[:skip_auth_check] != '1'
+    _,_,not_authorized_nodes = check_gui_status_of_nodes(
+      auth_user,
+      status[:known_nodes],
+      false,
+      3
+    )
 
-  if not_authorized_nodes.length > 0
-    node.warning_list << {
-      :message => 'Not authorized against node(s) ' +
-        not_authorized_nodes.join(', '),
-      :type => 'nodes_not_authorized',
-      :node_list => not_authorized_nodes,
-    }
+    if not_authorized_nodes.length > 0
+      node.warning_list << {
+        :message => 'Not authorized against node(s) ' +
+          not_authorized_nodes.join(', '),
+        :type => 'nodes_not_authorized',
+        :node_list => not_authorized_nodes,
+      }
+    end
   end
 
   version = params[:version] || '1'
@@ -1438,6 +1441,24 @@ def resource_cleanup(params, request, auth_user)
   end
 end
 
+def resource_refresh(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  cmd = [PCS, "resource", "refresh", params[:resource]]
+  if params[:full] == '1'
+    cmd << "--full"
+  end
+  stdout, stderr, retval = run_cmd(auth_user, *cmd)
+  if retval == 0
+    return JSON.generate({"success" => "true"})
+  else
+    return JSON.generate(
+      {"error" => "true", "stdout" => stdout, "stderror" => stderr}
+    )
+  end
+end
+
 def resource_start(params, request, auth_user)
   if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb
index 79e6909..000401c 100644
--- a/pcsd/views/main.erb
+++ b/pcsd/views/main.erb
@@ -144,15 +144,29 @@
     {{#if resource.stonith}}
       <div class="xdark sprites" style="float: left"></div>
       <div id="stonith_delete_link" class="link" onclick="verify_remove_fence_devices(curStonith());">Remove</div>
-      <div class="restart sprites" style="float: left"></div>
-      <div id="stonith_cleanup_link" class="link" onclick="cleanup_stonith();">Cleanup</div>
+      {{#if Pcs.is_supported_resource_refresh_one_resource}}
+        <div class="restart sprites" style="float: left"></div>
+        <div id="stonith_cleanup_link" class="link" onclick="cleanup_stonith();">Cleanup</div>
+        <div class="restart sprites" style="float: left"></div>
+        <div id="stonith_refresh_link" class="link" onclick="refresh_stonith(true);">Refresh</div>
+      {{else}}
+        <div class="restart sprites" style="float: left"></div>
+        <div id="stonith_refresh_link" class="link" onclick="refresh_stonith(false);">Refresh</div>
+      {{/if}}
     {{else}}
       <div class="checkdark sprites" style="float: left"></div>
       <div id="resource_start_link" class="link" onclick="enable_resource();">Enable</div>
       <div class="cancel sprites" style="float: left"></div>
       <div id="resource_stop_link" class="link" onclick="disable_resource();">Disable</div>
-      <div class="restart sprites" style="float: left"></div>
-      <div id="resource_cleanup_link" class="link" onclick="cleanup_resource();">Cleanup</div>
+      {{#if Pcs.is_supported_resource_refresh_one_resource}}
+        <div class="restart sprites" style="float: left"></div>
+        <div id="resource_cleanup_link" class="link" onclick="cleanup_resource();">Cleanup</div>
+        <div class="restart sprites" style="float: left"></div>
+        <div id="resource_refresh_link" class="link" onclick="refresh_resource(true);">Refresh</div>
+      {{else}}
+        <div class="restart sprites" style="float: left"></div>
+        <div id="resource_refresh_link" class="link" onclick="refresh_resource(false);">Refresh</div>
+      {{/if}}
       <div class="xdark sprites" style="float: left"></div>
       <div id="resource_delete_link" class="link" onclick="verify_remove_resources(curResource());">Remove</div>
       </div>
diff --git a/pylintrc b/pylintrc
index bb4da37..09df745 100644
--- a/pylintrc
+++ b/pylintrc
@@ -72,7 +72,7 @@ max-locals=47
 # Maximum number of statements in function / method body
 max-statements=160
 # Maximum number of branch for function / method body
-max-branches=63
+max-branches=64
 # Maximum number of public methods for a class (see R0904).
 max-public-methods=115
 # Maximum number of boolean expressions in a if statement (default 5)
diff --git a/setup.py b/setup.py
index 4fa23ef..0e763f4 100644
--- a/setup.py
+++ b/setup.py
@@ -17,7 +17,7 @@ class CleanCommand(Command):
 
 setup(
     name='pcs',
-    version='0.9.161',
+    version='0.9.162',
     description='Pacemaker Configuration System',
     author='Chris Feist',
     author_email='cfeist at redhat.com',
@@ -29,10 +29,13 @@ setup(
         'pcs',
         'test/resources/*.xml',
         'test/resources/*.conf',
+        'test/resources/qdevice-certs/*'
     ]},
+    zip_safe=False,
     entry_points={
         'console_scripts': [
             'pcs = pcs.app:main',
+            'pcs_snmp_agent = pcs.snmp.pcs_snmp_agent:main',
         ],
     },
     cmdclass={

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-ha/pcs.git



More information about the Debian-HA-Commits mailing list