[Debian-ha-commits] [pcs] 02/10: Imported Upstream version 0.9.151

Valentin Vidic vvidic-guest at moszumanska.debian.org
Fri Jun 17 11:35:30 UTC 2016


This is an automated email from the git hooks/post-receive script.

vvidic-guest pushed a commit to branch master
in repository pcs.

commit d3355620fb93a3e5a14cff0971d5336e75af3657
Author: Valentin Vidic <Valentin.Vidic at CARNet.hr>
Date:   Fri Jun 17 11:33:05 2016 +0200

    Imported Upstream version 0.9.151
---
 .gitignore                                         |    3 +
 .pylintrc                                          |  102 +
 MANIFEST.in                                        |    2 +-
 Makefile                                           |   30 +-
 newversion.py                                      |    7 +-
 pcs/acl.py                                         |   70 +-
 pcs/{pcs.py => app.py}                             |   81 +-
 pcs/bash_completion.sh                             |   38 +
 pcs/cli/__init__.py                                |    0
 pcs/cli/common/__init__.py                         |    0
 pcs/cli/common/completion.py                       |  101 +
 pcs/cli/common/console_report.py                   |   25 +
 pcs/cli/common/env.py                              |   15 +
 pcs/cli/common/errors.py                           |   20 +
 pcs/cli/common/lib_wrapper.py                      |  122 ++
 pcs/cli/common/middleware.py                       |   69 +
 pcs/cli/common/parse_args.py                       |   27 +
 pcs/cli/common/reports.py                          |   80 +
 pcs/cli/common/test/__init__.py                    |    0
 pcs/cli/common/test/test_completion.py             |  166 ++
 pcs/cli/common/test/test_console_report.py         |   22 +
 pcs/cli/common/test/test_lib_wrapper.py            |   35 +
 pcs/cli/common/test/test_middleware.py             |   40 +
 pcs/cli/common/test/test_parse_args.py             |   44 +
 pcs/cli/constraint/__init__.py                     |    0
 pcs/cli/constraint/command.py                      |   70 +
 pcs/cli/constraint/console_report.py               |   58 +
 pcs/cli/constraint/parse_args.py                   |   48 +
 pcs/cli/constraint/test/__init__.py                |    0
 pcs/cli/constraint/test/test_command.py            |   81 +
 pcs/cli/constraint/test/test_console_report.py     |   75 +
 pcs/cli/constraint/test/test_parse_args.py         |   82 +
 pcs/cli/constraint_all/__init__.py                 |    0
 pcs/cli/constraint_all/console_report.py           |   55 +
 pcs/cli/constraint_all/test/__init__.py            |    0
 pcs/cli/constraint_all/test/test_console_report.py |   79 +
 pcs/cli/constraint_colocation/__init__.py          |    0
 pcs/cli/constraint_colocation/command.py           |   39 +
 pcs/cli/constraint_colocation/console_report.py    |   28 +
 pcs/cli/constraint_order/__init__.py               |    0
 pcs/cli/constraint_order/command.py                |   39 +
 pcs/cli/constraint_order/console_report.py         |   53 +
 pcs/cli/constraint_ticket/__init__.py              |    0
 pcs/cli/constraint_ticket/command.py               |   67 +
 pcs/cli/constraint_ticket/console_report.py        |   26 +
 pcs/cli/constraint_ticket/parse_args.py            |   44 +
 pcs/cli/constraint_ticket/test/__init__.py         |    0
 pcs/cli/constraint_ticket/test/test_command.py     |   67 +
 .../constraint_ticket/test/test_console_report.py  |   23 +
 pcs/cli/constraint_ticket/test/test_parse_args.py  |   86 +
 pcs/cluster.py                                     |  506 ++---
 pcs/common/__init__.py                             |    0
 pcs/common/report_codes.py                         |   82 +
 pcs/common/tools.py                                |   21 +
 pcs/config.py                                      |   57 +-
 pcs/constraint.py                                  |  461 +----
 pcs/error_codes.py                                 |   23 -
 pcs/lib/__init__.py                                |    0
 pcs/lib/cib/__init__.py                            |    0
 pcs/lib/cib/acl.py                                 |  104 +
 pcs/lib/cib/constraint/__init__.py                 |    0
 pcs/lib/cib/constraint/colocation.py               |   40 +
 pcs/lib/cib/constraint/constraint.py               |  139 ++
 pcs/lib/cib/constraint/order.py                    |   53 +
 pcs/lib/cib/constraint/resource_set.py             |   74 +
 pcs/lib/cib/constraint/ticket.py                   |  117 ++
 pcs/lib/cib/resource.py                            |   15 +
 pcs/lib/cib/test/__init__.py                       |    0
 pcs/lib/cib/test/test_constraint.py                |  292 +++
 pcs/lib/cib/test/test_constraint_colocation.py     |   99 +
 pcs/lib/cib/test/test_constraint_order.py          |  103 +
 pcs/lib/cib/test/test_constraint_ticket.py         |  295 +++
 pcs/lib/cib/test/test_resource_set.py              |  110 ++
 pcs/lib/cib/tools.py                               |   89 +
 pcs/lib/commands/__init__.py                       |    0
 pcs/lib/commands/constraint/__init__.py            |    0
 pcs/lib/commands/constraint/colocation.py          |   25 +
 pcs/lib/commands/constraint/common.py              |   88 +
 pcs/lib/commands/constraint/order.py               |   24 +
 pcs/lib/commands/constraint/ticket.py              |   70 +
 pcs/lib/commands/quorum.py                         |  107 +
 pcs/lib/commands/test/__init__.py                  |    0
 pcs/lib/commands/test/test_constraint_common.py    |  193 ++
 pcs/lib/commands/test/test_ticket.py               |   76 +
 pcs/lib/corosync/__init__.py                       |    0
 pcs/lib/corosync/config_facade.py                  |  501 +++++
 .../corosync/config_parser.py}                     |   31 +-
 pcs/lib/corosync/live.py                           |   49 +
 pcs/lib/env.py                                     |  168 ++
 pcs/{ => lib}/errors.py                            |   25 +-
 pcs/lib/exchange_formats.md                        |   34 +
 pcs/lib/external.py                                |  345 ++++
 pcs/lib/node.py                                    |   40 +
 pcs/lib/nodes_task.py                              |  117 ++
 pcs/lib/pacemaker.py                               |  215 +++
 pcs/lib/pacemaker_state.py                         |  153 ++
 pcs/lib/pacemaker_values.py                        |  100 +
 pcs/lib/reports.py                                 |  916 +++++++++
 pcs/lib/resource_agent.py                          |  427 ++++
 pcs/lib/test/__init__.py                           |    0
 pcs/lib/test/test_pacemaker_values.py              |  252 +++
 pcs/library_acl.py                                 |  135 --
 pcs/node.py                                        |   67 +-
 pcs/pcs                                            |   13 +-
 pcs/pcs.8                                          |   84 +-
 pcs/pcsd.py                                        |   19 +-
 pcs/prop.py                                        |   17 +-
 pcs/quorum.py                                      |  163 ++
 pcs/resource.py                                    |  483 ++---
 pcs/rule.py                                        |   12 +-
 pcs/settings.py                                    |   24 +-
 pcs/settings.py.debian                             |   19 +-
 pcs/{settings.py => settings_default.py}           |    8 +-
 pcs/status.py                                      |  105 +-
 pcs/stonith.py                                     |  218 +--
 pcs/test/Makefile                                  |   16 -
 pcs/test/__init__.py                               |    0
 pcs/test/a.xml                                     |   13 -
 pcs/test/blank.xml                                 |   13 -
 pcs/test/final.xml                                 |  117 --
 pcs/test/library_test_tools.py                     |   93 -
 pcs/test/pcs_test_assertions.py                    |   75 -
 pcs/test/pcs_test_functions.py                     |   84 -
 pcs/test/{ => resources}/.gitignore                |    1 -
 .../{empty-1.2.xml => resources/cib-empty-1.2.xml} |    0
 .../cib-empty-withnodes.xml}                       |    0
 pcs/test/{empty.xml => resources/cib-empty.xml}    |    0
 pcs/test/{large.xml => resources/cib-large.xml}    |    0
 .../{largefile.xml => resources/cib-largefile.xml} |    0
 pcs/test/{ => resources}/cluster.conf              |    0
 .../corosync-3nodes-qdevice.conf}                  |   13 +
 .../corosync-3nodes.conf}                          |    5 +
 pcs/test/{ => resources}/corosync.conf             |    1 +
 pcs/test/resources/crm_mon.minimal.xml             |   10 +
 pcs/test/{ => resources}/transitions01.xml         |    0
 pcs/test/{ => resources}/transitions02.xml         |    0
 pcs/test/suite.py                                  |   87 +
 pcs/test/test.py                                   |   23 -
 pcs/test/test.sh                                   |   45 -
 pcs/test/test_acl.py                               |   69 +-
 pcs/test/test_cluster.py                           |  575 ++++--
 pcs/test/test_constraints.py                       |  446 +++--
 .../{test_library_acl.py => test_lib_cib_acl.py}   |  128 +-
 pcs/test/test_lib_cib_tools.py                     |  147 ++
 pcs/test/test_lib_commands_quorum.py               |  671 +++++++
 pcs/test/test_lib_corosync_config_facade.py        | 2042 ++++++++++++++++++++
 ..._conf.py => test_lib_corosync_config_parser.py} |  175 +-
 pcs/test/test_lib_corosync_live.py                 |  101 +
 pcs/test/test_lib_env.py                           |  354 ++++
 pcs/test/test_lib_external.py                      |  860 +++++++++
 pcs/test/test_lib_node.py                          |   82 +
 pcs/test/test_lib_nodes_task.py                    |  452 +++++
 pcs/test/test_lib_pacemaker.py                     |  925 +++++++++
 pcs/test/test_lib_pacemaker_state.py               |  154 ++
 pcs/test/test_lib_resource_agent.py                |  986 ++++++++++
 pcs/test/test_node.py                              |   65 +-
 pcs/test/test_properties.py                        |   33 +-
 pcs/test/test_quorum.py                            |  388 ++++
 pcs/test/test_resource.py                          |  285 ++-
 pcs/test/test_rule.py                              |   52 +-
 pcs/test/test_stonith.py                           |   76 +-
 pcs/test/test_utils.py                             |  282 ++-
 pcs/test/tools/__init__.py                         |    0
 pcs/test/tools/assertions.py                       |  188 ++
 pcs/test/tools/color_text_runner.py                |  112 ++
 pcs/test/tools/custom_mock.py                      |   24 +
 pcs/test/tools/misc.py                             |   52 +
 pcs/test/tools/pcs_mock.py                         |   13 +
 pcs/test/tools/pcs_runner.py                       |   78 +
 pcs/test/tools/xml.py                              |   51 +
 pcs/usage.py                                       |  242 ++-
 pcs/utils.py                                       |  784 ++++----
 pcsd/.gitignore                                    |    2 -
 pcsd/Gemfile                                       |    2 -
 pcsd/Gemfile.lock                                  |   19 +-
 pcsd/Makefile                                      |   16 +-
 pcsd/bootstrap.rb                                  |    2 +-
 pcsd/cfgsync.rb                                    |   47 +-
 pcsd/config.rb                                     |   66 +-
 pcsd/fenceagent.rb                                 |   82 +-
 pcsd/pcs.rb                                        |   14 +-
 pcsd/pcsd.conf                                     |   18 +-
 pcsd/pcsd.rb                                       |  220 ++-
 pcsd/public/css/style.css                          |    4 +
 pcsd/public/js/nodes-ember.js                      |  215 ++-
 pcsd/public/js/pcsd.js                             |  144 +-
 pcsd/remote.rb                                     |  180 +-
 pcsd/resource.rb                                   |   77 -
 pcsd/session.rb                                    |    8 +-
 pcsd/ssl.rb                                        |   79 +-
 pcsd/test/test_cfgsync.rb                          |   22 +-
 pcsd/test/test_config.rb                           |  188 +-
 pcsd/views/_configure.erb                          |   46 +-
 pcsd/views/_resource.erb                           |   22 +-
 pcsd/views/fenceagentform.erb                      |   78 -
 pcsd/views/main.erb                                |  243 ++-
 pcsd/views/resourceagentform.erb                   |  104 -
 setup.py                                           |   32 +-
 198 files changed, 19675 insertions(+), 3865 deletions(-)

diff --git a/.gitignore b/.gitignore
index 950d231..99de27e 100644
--- a/.gitignore
+++ b/.gitignore
@@ -4,5 +4,8 @@
 /dist/
 /pcs/bash_completion.d.pcs
 /pcsd/pcs_settings.conf
+/pcsd/pcs_settings.conf.*
 /pcsd/pcs_users.conf
+/pcsd/.bundle
+/pcsd/vendor
 
diff --git a/.pylintrc b/.pylintrc
new file mode 100644
index 0000000..661f3d2
--- /dev/null
+++ b/.pylintrc
@@ -0,0 +1,102 @@
+[MESSAGES CONTROL]
+#useful links:
+#http://pylint-messages.wikidot.com/all-codes
+#https://docs.pylint.org/features.html
+#
+#can be used for example with
+#pylint --reports=n ./**/*.py --msg-template='{path}:{line}: {msg_id}: [{symbol}] {msg}'
+#
+#project adapted to pylint 1.5.4, astroid 1.4.4
+#
+# (C) convention, for programming standard violation
+# (R) refactor, for bad code smell
+# (W) warning, for python specific problems
+# (E) error, for much probably bugs in the code
+# (F) fatal, if an error occurred which prevented pylint from doing further processing.
+
+#C0103: [invalid-name] Invalid %s name "%s"
+#C0111: [missing-docstring] Missing %s docstring
+#C0121: [singleton-comparison] Comparison to None should be 'expr is None'
+#C0122: [misplaced-comparison-constant] Comparison should be code == 3
+#C0200: [consider-using-enumerate] Consider using enumerate instead of iterating with range and len
+#C0325: [superfluous-parens] Unnecessary parens after %r keyword
+#C0326: [bad-whitespace] %s space %s %s %s\n%s
+#C0330: [bad-continuation] Wrong continued indentation - https://github.com/PyCQA/pylint/issues/289
+#C0410: [multiple-imports] Multiple imports on one line
+#C0411: [wrong-import-order] %s comes before %s Used when PEP8 import order is not respected (standard imports first, then third-party libraries, then local imports)
+#C0412: [ungrouped-imports] Imports from package subprocess are not grouped - is emitted when imports from the same package or module are not placed together, but scattered around in the code
+#C0413: [wrong-import-position] Import "from errors import error_codes" should be placed at the top of the module
+#E0202: [method-hidden] An attribute affected in %s line %s hide this method
+#E0611: [no-name-in-module] No name %r in module %r
+#E1101: [no-member] %s %r has no %r member
+#E1102: [not-callable] %s is not callable
+#E1103: [maybe-no-member] %s %r has no %r member (but some types could not be inferred)
+#E1136: [unsubscriptable-object] Value 'self.allowed_child_ids' is unsubscriptable
+#F0401: [import-error] Unable to import %s
+#R0102: [simplifiable-if-statement] The if statement can be reduced by assigning bool of test
+#R0201: [no-self-use] Method could be a function
+#R0204: [redefined-variable-type] Redefinition of key_modulus type from unicode to str
+#R0903: [too-few-public-methods] Too few public methods (%s/%s)
+#R0911: [too-many-return-statements] Too many return statements (%s/%s)
+#R0912: [too-many-branches] Too many branches (%s/%s)
+#R0913: [too-many-arguments] Too many arguments (%s/%s)
+#W0102: [dangerous-default-value] Dangerous default value %s as argument
+#W0141: [bad-builtin] Used builtin function %r
+#W0142: [star-args] Used * or ** magic
+#W0150: [lost-exception] %s statement in finally block may swallow exception
+#W0212: [protected-access] Access to a protected member %s of a client class
+#W0232: [no-init] Class has no __init__ method
+#W0401: [wildcard-import] Wildcard import %s
+#W0403: [relative-import] Relative import %r, should be %r
+#W0511: [fixme] Used when a warning note as FIXME or XXX is detected.
+#W0603: [global-statement] Using the global statement
+#W0612: [locally-disabled] Unused variable %r
+#W0613: [unused-argument] Unused argument %r
+#W0614: [unused-wildcard-import] Unused import %s from wildcard import
+#W0621: [redefined-outer-name] Redefining name %r from outer scope (line %s)
+#W0622: [redefined-builtin] Redefining built-in %r
+#W0631: [undefined-loop-variable] Using possibly undefined loop variable %r
+#W0632: [unbalanced-tuple-unpacking] Possible unbalanced tuple unpacking with sequence%s: …
+#W0702: [bare-except] No exception type(s) specified
+#W0703: [broad-except] Catching too general exception %s
+#W0710: [nonstandard-exception] Exception doesn't inherit from standard "Exception" class
+#W1401: [anomalous-backslash-in-string] Anomalous backslash in string: \'%s\'. String constant might be missing an r prefix.
+disable=no-name-in-module, import-error, nonstandard-exception, unused-argument, redefined-outer-name, bare-except,  anomalous-backslash-in-string, no-member, star-args, undefined-loop-variable, maybe-no-member, broad-except, too-few-public-methods, not-callable, protected-access, method-hidden, too-many-arguments, global-statement, unbalanced-tuple-unpacking, fixme, lost-exception, dangerous-default-value, too-many-return-statements, no-self-use, no-init, redefined-builtin, wildcard-imp [...]
+
+[DESIGN]
+# Maximum number of locals for function / method body
+max-locals=48
+# Maximum number of statements in function / method body
+max-statements=211
+# Maximum number of branch for function / method body
+max-branches=68
+# Maximum number of public methods for a class (see R0904).
+max-public-methods=113
+# Maximum number of boolean expressions in a if statement
+max-bool-expr=6
+
+
+[LOGGING]
+# Ignore imports when computing similarities.
+# unfortunately don't work with from abc import (...similar lines...
+ignore-imports=yes
+# Minimum lines number of a similarity.
+min-similarity-lines=9
+
+
+[VARIABLES]
+# A regular expression matching the name of dummy variables (i.e. expectedly
+# not used).
+dummy-variables-rgx=_$|dummy
+
+
+[FORMAT]
+# Maximum number of lines in a module
+max-module-lines=4571
+# Maximum number of characters on a single line.
+max-line-length=1291
+
+[ELIF]
+
+# Maximum number of nested blocks for function / method body
+max-nested-blocks=7
diff --git a/MANIFEST.in b/MANIFEST.in
index 6ac4a25..e232624 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,7 +1,7 @@
 include Makefile
 include COPYING
 include pcs/pcs.8
-include pcs/bash_completion.d.pcs
+include pcs/bash_completion.sh
 include pcsd/.bundle/config
 graft pcsd
 graft pcsd/vendor/cache
diff --git a/Makefile b/Makefile
index a3d5687..de216ce 100644
--- a/Makefile
+++ b/Makefile
@@ -4,6 +4,10 @@ DISTRO_DEBIAN := $(shell if [ -e /etc/debian_version ] ; then echo true; else ec
 IS_DEBIAN=false
 DISTRO_DEBIAN_VER_8=false
 
+ifndef PYTHON
+	PYTHON=python
+endif
+
 ifeq ($(UNAME_OS_GNU),true)
   ifeq ($(DISTRO_DEBIAN),true)
     IS_DEBIAN=true
@@ -16,7 +20,7 @@ ifeq ($(UNAME_OS_GNU),true)
 endif
 
 ifndef PYTHON_SITELIB
-  PYTHON_SITELIB=$(shell python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")
+  PYTHON_SITELIB=$(shell $(PYTHON) -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")
 endif
 ifeq ($(PYTHON_SITELIB), /usr/lib/python2.6/dist-packages)
   EXTRA_SETUP_OPTS="--install-layout=deb"
@@ -53,7 +57,7 @@ endif
 MANDIR=/usr/share/man
 
 ifndef PREFIX
-  PREFIX=$(shell prefix=`python -c "import sys; print(sys.prefix)"` || prefix="/usr"; echo $$prefix)
+  PREFIX=$(shell prefix=`$(PYTHON) -c "import sys; print(sys.prefix)"` || prefix="/usr"; echo $$prefix)
 endif
 
 ifndef systemddir
@@ -72,12 +76,11 @@ ifndef install_settings
   endif
 endif
 
-install: bash_completion
-	python setup.py install --prefix ${DESTDIR}${PREFIX} ${EXTRA_SETUP_OPTS}
+install:
+	$(PYTHON) setup.py install --root=$(or ${DESTDIR}, /) ${EXTRA_SETUP_OPTS}
 	mkdir -p ${DESTDIR}${PREFIX}/sbin/
-	chmod 755 ${DESTDIR}${PYTHON_SITELIB}/pcs/pcs.py
-	ln -fs ${PYTHON_SITELIB}/pcs/pcs.py ${DESTDIR}${PREFIX}/sbin/pcs
-	install -D pcs/bash_completion.d.pcs ${DESTDIR}/etc/bash_completion.d/pcs
+	mv ${DESTDIR}${PREFIX}/bin/pcs ${DESTDIR}${PREFIX}/sbin/pcs
+	install -D pcs/bash_completion.sh ${DESTDIR}/etc/bash_completion.d/pcs
 	install -m644 -D pcs/pcs.8 ${DESTDIR}/${MANDIR}/man8/pcs.8
 ifeq ($(IS_DEBIAN),true)
   ifeq ($(install_settings),true)
@@ -86,7 +89,7 @@ ifeq ($(IS_DEBIAN),true)
 	        sed s/DEB_HOST_MULTIARCH/${DEB_HOST_MULTIARCH}/g pcs/settings.py.debian > $$tmp_settings; \
 	        install -m644 $$tmp_settings ${DESTDIR}${PYTHON_SITELIB}/pcs/settings.py; \
 	        rm -f $$tmp_settings
-	python -m compileall -fl ${DESTDIR}${PYTHON_SITELIB}/pcs/settings.py
+	$(PYTHON) -m compileall -fl ${DESTDIR}${PYTHON_SITELIB}/pcs/settings.py
   endif
 endif
 
@@ -146,12 +149,9 @@ endif
 	rm -f ${DESTDIR}/etc/pam.d/pcsd
 	rm -rf ${DESTDIR}/var/lib/pcsd
 
-tarball: bash_completion
-	python setup.py sdist --formats=tar
-	python maketarballs.py
+tarball:
+	$(PYTHON) setup.py sdist --formats=tar
+	$(PYTHON) maketarballs.py
 
 newversion:
-	python newversion.py
-
-bash_completion:
-	cd pcs ; python -c 'import usage;  usage.sub_generate_bash_completion()' > bash_completion.d.pcs ; cd ..
+	$(PYTHON) newversion.py
diff --git a/newversion.py b/newversion.py
index 09fb547..fed9a5e 100644
--- a/newversion.py
+++ b/newversion.py
@@ -25,8 +25,7 @@ pcs_version_split[2] = str(int(pcs_version_split[2]) + 1)
 new_version = ".".join(pcs_version_split)
 
 print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' setup.py"))
-print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' pcs/settings.py"))
-print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' pcs/settings.py.debian"))
+print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' pcs/settings_default.py"))
 print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' pcsd/bootstrap.rb"))
 
 manpage_head = '.TH PCS "8" "{date}" "pcs {version}" "System Administration Utilities"'.format(
@@ -39,8 +38,8 @@ print(os.system("git diff"))
 print("Look good? (y/n)")
 choice = sys.stdin.read(1)
 if choice != "y":
-  print("Ok, exiting")
-  sys.exit(0)
+    print("Ok, exiting")
+    sys.exit(0)
 
 print(os.system("git commit -a -m 'Bumped to "+new_version+"'"))
 print(os.system("git tag "+new_version))
diff --git a/pcs/acl.py b/pcs/acl.py
index e347c27..118ceed 100644
--- a/pcs/acl.py
+++ b/pcs/acl.py
@@ -1,25 +1,26 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
 import sys
 
-import usage
-import utils
-import prop
-from errors import CmdLineInputError
-from errors import ReportItem
-from errors import ReportItemSeverity
-from errors import error_codes
-from library_acl import LibraryError
-from library_acl import create_role
-from library_acl import provide_role
-from library_acl import add_permissions_to_role
-
-def exit_on_cmdline_input_errror(usage_name):
-    usage.acl([usage_name])
-    sys.exit(1)
+from pcs import (
+    prop,
+    usage,
+    utils,
+)
+from pcs.lib.pacemaker import get_cib_xml, get_cib, replace_cib_configuration
+from pcs.lib.pacemaker_values import is_true
+from pcs.lib.cib.acl import (
+    add_permissions_to_role,
+    create_role,
+    provide_role,
+)
+from pcs.cli.common.errors import CmdLineInputError
+from pcs.lib.errors import LibraryError
 
 def acl_cmd(argv):
     if len(argv) == 0:
@@ -56,7 +57,7 @@ def acl_show(argv):
 
     properties = prop.get_set_properties(defaults=prop.get_default_properties())
     acl_enabled = properties.get("enable-acl", "").lower()
-    if utils.is_cib_true(acl_enabled):
+    if is_true(acl_enabled):
         print("ACLs are enabled")
     else:
         print("ACLs are disabled, run 'pcs acl enable' to enable")
@@ -82,7 +83,7 @@ def acl_role(argv):
         try:
             run_create_role(argv)
         except CmdLineInputError as e:
-            exit_on_cmdline_input_errror('role create')
+            utils.exit_on_cmdline_input_errror(e, 'acl', 'role create')
         except LibraryError as e:
             utils.process_library_reports(e.args)
 
@@ -171,7 +172,7 @@ def acl_permission(argv):
         try:
             run_permission_add(argv)
         except CmdLineInputError as e:
-            exit_on_cmdline_input_errror('permission add')
+            utils.exit_on_cmdline_input_errror(e, 'acl', 'permission add')
         except LibraryError as e:
             utils.process_library_reports(e.args)
 
@@ -216,13 +217,16 @@ def argv_to_permission_info_list(argv):
     if len(argv) % 3 != 0:
         raise CmdLineInputError()
 
-    permission_info_list = zip(
+    #wrapping by list,
+    #because in python3 zip() returns an iterator instead of a list
+    #and the loop below makes iteration over it
+    permission_info_list = list(zip(
         [permission.lower() for permission in argv[::3]],
         [scope_type.lower() for scope_type in argv[1::3]],
         argv[2::3]
-    )
+    ))
 
-    for permission, scope_type, scope in permission_info_list:
+    for permission, scope_type, dummy_scope in permission_info_list:
         if(
             permission not in ['read', 'write', 'deny']
             or
@@ -242,10 +246,10 @@ def run_create_role(argv):
         description = argv.pop(0)[len(desc_key):]
     permission_info_list = argv_to_permission_info_list(argv)
 
-    dom = utils.get_cib_dom()
-    create_role(dom, role_id, description)
-    add_permissions_to_role(dom, role_id, permission_info_list)
-    utils.replace_cib_configuration(dom)
+    cib = get_cib(get_cib_xml(utils.cmd_runner()))
+    create_role(cib, role_id, description)
+    add_permissions_to_role(cib, role_id, permission_info_list)
+    replace_cib_configuration(utils.cmd_runner(), cib)
 
 def run_role_delete(argv):
     if len(argv) < 1:
@@ -360,10 +364,10 @@ def run_permission_add(argv):
     role_id = argv.pop(0)
     permission_info_list = argv_to_permission_info_list(argv)
 
-    dom = utils.get_cib_dom()
-    provide_role(dom, role_id)
-    add_permissions_to_role(dom, role_id, permission_info_list)
-    utils.replace_cib_configuration(dom)
+    cib = get_cib(get_cib_xml(utils.cmd_runner()))
+    provide_role(cib, role_id)
+    add_permissions_to_role(cib, role_id, permission_info_list)
+    replace_cib_configuration(utils.cmd_runner(), cib)
 
 def run_permission_delete(argv):
     dom = utils.get_cib_dom()
diff --git a/pcs/pcs.py b/pcs/app.py
old mode 100755
new mode 100644
similarity index 87%
rename from pcs/pcs.py
rename to pcs/app.py
index bda6c0b..d8255f8
--- a/pcs/pcs.py
+++ b/pcs/app.py
@@ -1,32 +1,47 @@
-#!/usr/bin/python
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import sys
-import os
 import getopt
+import os
+import sys
+import logging
+logging.basicConfig()
 
-import usage
-import cluster
-import resource
-import stonith
-import prop
-import constraint
-import acl
-import utils
-import status
-import settings
-import config
-import pcsd
-import node
+from pcs import (
+    acl,
+    cluster,
+    config,
+    constraint,
+    node,
+    pcsd,
+    prop,
+    quorum,
+    resource,
+    settings,
+    status,
+    stonith,
+    usage,
+    utils,
+)
+
+from pcs.cli.common import completion
 
 
 usefile = False
 filename = ""
-def main(argv):
+def main(argv=None):
+    if completion.has_applicable_environment(os.environ):
+        print(completion.make_suggestions(
+            os.environ,
+            usage.generate_completion_tree_from_usage()
+        ))
+        sys.exit()
+
+    argv = argv if argv else sys.argv[1:]
     utils.subprocess_setup()
     global filename, usefile
     orig_argv = argv[:]
@@ -63,15 +78,9 @@ def main(argv):
         # p = password (cluster auth), u = user (cluster auth),
         # V = verbose (cluster verify)
         pcs_short_options = "hf:p:u:V"
-        pcs_short_options_with_args = []
-        for c in pcs_short_options:
-            if c == ":":
-                pcs_short_options_with_args.append(prev_char)
-            prev_char = c
-
         pcs_long_options = [
             "debug", "version", "help", "fullhelp",
-            "force", "autocorrect", "interactive", "autodelete",
+            "force", "skip-offline", "autocorrect", "interactive", "autodelete",
             "all", "full", "groups", "local", "wait", "config",
             "start", "enable", "disabled", "off",
             "pacemaker", "corosync",
@@ -146,6 +155,12 @@ def main(argv):
         usage.main()
         sys.exit(1)
 
+    # create a dummy logger
+    # we do not have a log file for cli (yet), but library requires a logger
+    logger = logging.getLogger("old_cli")
+    logger.propagate = 0
+    logger.handlers = []
+
     command = argv.pop(0)
     if (command == "-h" or command == "help"):
         usage.main()
@@ -161,6 +176,11 @@ def main(argv):
         "config": config.config_cmd,
         "pcsd": pcsd.pcsd_cmd,
         "node": node.node_cmd,
+        "quorum": lambda argv: quorum.quorum_cmd(
+            utils.get_library_wrapper(),
+            argv,
+            utils.get_modificators()
+        ),
     }
     if command not in cmd_map:
         usage.main()
@@ -226,6 +246,3 @@ def main(argv):
             sys.exit(exitcode)
             return
     cmd_map[command](argv)
-
-if __name__ == "__main__":
-  main(sys.argv[1:])
diff --git a/pcs/bash_completion.sh b/pcs/bash_completion.sh
new file mode 100644
index 0000000..14aff29
--- /dev/null
+++ b/pcs/bash_completion.sh
@@ -0,0 +1,38 @@
+# bash completion for pcs
+_pcs_completion(){
+  
+  LENGTHS=()
+  for WORD in "${COMP_WORDS[@]}"; do
+    LENGTHS+=(${#WORD})
+  done
+
+
+  COMPREPLY=( $( \
+    env COMP_WORDS="${COMP_WORDS[*]}" \
+    COMP_LENGTHS="${LENGTHS[*]}" \
+    COMP_CWORD=$COMP_CWORD \
+    PCS_AUTO_COMPLETE=1 pcs \
+  ) )
+
+  #examples what we get:
+  #pcs
+  #COMP_WORDS: pcs COMP_LENGTHS: 3
+  #pcs co
+  #COMP_WORDS: pcs co COMP_LENGTHS: 3 2
+  #      pcs          config        
+  #COMP_WORDS: pcs config COMP_LENGTHS: 3 6
+  #      pcs          config       "  
+  #COMP_WORDS: pcs config "    COMP_LENGTHS: 3 6 4
+  #      pcs          config       "'\\n
+  #COMP_WORDS: pcs config "'\\n COMP_LENGTHS: 3 6 5'"
+}
+
+# -o default
+#   Use readline's default filename completion if the compspec generates no
+#   matches.
+# -F function
+#   The shell function function is executed in the current shell environment. 
+#   When it finishes, the possible completions are retrieved from the value of
+#   the COMPREPLY array variable.
+
+complete -o default -F _pcs_completion pcs
diff --git a/pcs/cli/__init__.py b/pcs/cli/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/cli/common/__init__.py b/pcs/cli/common/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/cli/common/completion.py b/pcs/cli/common/completion.py
new file mode 100644
index 0000000..0b4cee8
--- /dev/null
+++ b/pcs/cli/common/completion.py
@@ -0,0 +1,101 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+def has_applicable_environment(environment):
+    """
+    dict environment - very likely os.environ
+    """
+    return bool(
+        all(
+            key in environment
+            for key in
+            ["COMP_WORDS", "COMP_LENGTHS", "COMP_CWORD", "PCS_AUTO_COMPLETE"]
+        )
+        and
+        environment['PCS_AUTO_COMPLETE'].strip() not in ('0', '')
+        and
+        environment['COMP_CWORD'].isdigit()
+    )
+
+def make_suggestions(environment, suggestion_tree):
+    """
+    dict environment - very likely os.environ
+    dict suggestion_tree - {'acl': {'role': {'create': ...}}}...
+    """
+    if not has_applicable_environment(environment):
+        raise EnvironmentError("Environment is not completion read")
+
+    try:
+        typed_word_list = _split_words(
+            environment["COMP_WORDS"],
+            environment["COMP_LENGTHS"].split(" "),
+        )
+    except EnvironmentError:
+        return ""
+
+    return "\n".join(_find_suggestions(
+        suggestion_tree,
+        typed_word_list,
+        int(environment['COMP_CWORD'])
+    ))
+
+def _split_words(joined_words, word_lengths):
+    cursor_position = 0
+    words_string_len = len(joined_words)
+    word_list = []
+    for length in word_lengths:
+        if not length.isdigit():
+            raise EnvironmentError(
+                "Length of word '{0}' is not digit".format(length)
+            )
+        next_position = cursor_position + int(length)
+        if next_position > words_string_len:
+            raise EnvironmentError(
+                "Expected lengths are bigger than word lengths"
+            )
+        if(
+            next_position != words_string_len
+            and
+            not joined_words[next_position].isspace()
+        ):
+            raise EnvironmentError("Words separator is not expected space")
+
+        word_list.append(joined_words[cursor_position:next_position])
+        cursor_position = next_position + 1
+
+    if words_string_len > next_position:
+        raise EnvironmentError("Expected lengths are smaller then word lengths")
+
+    return word_list
+
+def _find_suggestions(suggestion_tree, typed_word_list, word_under_cursor_idx):
+    if not  1 <= word_under_cursor_idx <= len(typed_word_list):
+        return []
+
+    if len(typed_word_list) == word_under_cursor_idx:
+        #not started type the last word yet
+        word_under_cursor = ''
+    else:
+        word_under_cursor = typed_word_list[word_under_cursor_idx]
+
+    words_for_current_cursor_position = _get_subcommands(
+        suggestion_tree,
+        typed_word_list[1:word_under_cursor_idx]
+    )
+
+    return [
+        word for word in words_for_current_cursor_position
+        if word.startswith(word_under_cursor)
+    ]
+
+def _get_subcommands(suggestion_tree, previous_subcommand_list):
+    subcommand_tree = suggestion_tree
+    for subcommand in previous_subcommand_list:
+        if subcommand not in subcommand_tree:
+            return []
+        subcommand_tree = subcommand_tree[subcommand]
+    return sorted(list(subcommand_tree.keys()))
diff --git a/pcs/cli/common/console_report.py b/pcs/cli/common/console_report.py
new file mode 100644
index 0000000..3d42798
--- /dev/null
+++ b/pcs/cli/common/console_report.py
@@ -0,0 +1,25 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import sys
+
+
+def error(message, exit=True):
+    sys.stderr.write("Error: {0}\n".format(message))
+    if exit:
+        sys.exit(1)
+
+def indent(line_list, indent_step=2):
+    """
+    return line list where each line of input is prefixed by N spaces
+    list of string line_list are original lines
+    int indent_step is count of spaces for line prefix
+    """
+    return [
+        "{0}{1}".format(" "*indent_step, line) if line else line
+        for line in line_list
+    ]
diff --git a/pcs/cli/common/env.py b/pcs/cli/common/env.py
new file mode 100644
index 0000000..f407981
--- /dev/null
+++ b/pcs/cli/common/env.py
@@ -0,0 +1,15 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+class Env(object):
+    def __init__(self):
+        self.cib_data = None
+        self.user = None
+        self.groups = None
+        self.corosync_conf_data = None
+        self.auth_tokens_getter = None
+        self.debug = False
diff --git a/pcs/cli/common/errors.py b/pcs/cli/common/errors.py
new file mode 100644
index 0000000..19ca734
--- /dev/null
+++ b/pcs/cli/common/errors.py
@@ -0,0 +1,20 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+class CmdLineInputError(Exception):
+    """
+    Exception express that user entered incorrect commad in command line.
+    """
+    def __init__(self, message=None):
+        """
+        string message explain what was wrong with entered command
+        The routine which handles this exception behaves according to whether
+        the message was specified (prints this message to user) or not (prints
+        appropriate part of documentation)
+        """
+        super(CmdLineInputError, self).__init__(message)
+        self.message = message
diff --git a/pcs/cli/common/lib_wrapper.py b/pcs/cli/common/lib_wrapper.py
new file mode 100644
index 0000000..269a9ac
--- /dev/null
+++ b/pcs/cli/common/lib_wrapper.py
@@ -0,0 +1,122 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from collections import namedtuple
+from functools import partial
+import logging
+
+from pcs.cli.common import middleware
+
+#from pcs.lib import commands does not work: "commands" is package
+from pcs.lib.commands.constraint import colocation as constraint_colocation
+from pcs.lib.commands.constraint import order as constraint_order
+from pcs.lib.commands.constraint import ticket as constraint_ticket
+from pcs.lib.commands import quorum
+from pcs.cli.common.reports import (
+    LibraryReportProcessorToConsole as LibraryReportProcessorToConsole,
+)
+
+from pcs.lib.env import LibraryEnvironment
+
+_CACHE = {}
+
+def wrapper(dictionary):
+    return namedtuple('wrapper', dictionary.keys())(**dictionary)
+
+def cli_env_to_lib_env(cli_env):
+    return LibraryEnvironment(
+        logging.getLogger("old_cli"),
+        LibraryReportProcessorToConsole(cli_env.debug),
+        cli_env.user,
+        cli_env.groups,
+        cli_env.cib_data,
+        cli_env.corosync_conf_data,
+        cli_env.auth_tokens_getter,
+    )
+
+def bind(cli_env, run_with_middleware, run_library_command):
+    def run(cli_env, *args, **kwargs):
+        lib_env = cli_env_to_lib_env(cli_env)
+
+        lib_call_result = run_library_command(lib_env, *args, **kwargs)
+
+        #midlewares needs finish its work and they see only cli_env
+        #so we need reflect some changes to cli_env
+        if not lib_env.is_cib_live:
+            cli_env.cib_data = lib_env.get_cib_xml()
+        if not lib_env.is_corosync_conf_live:
+            cli_env.corosync_conf_data = lib_env.get_corosync_conf_data()
+
+        return lib_call_result
+    return partial(run_with_middleware, run, cli_env)
+
+def bind_all(env, run_with_middleware, dictionary):
+    return wrapper(dict(
+        (exposed_fn, bind(env, run_with_middleware, library_fn))
+        for exposed_fn, library_fn in dictionary.items()
+    ))
+
+def get_module(env, middleware_factory, name):
+    if name not in _CACHE:
+        _CACHE[name] = load_module(env, middleware_factory, name)
+    return _CACHE[name]
+
+
+def load_module(env, middleware_factory, name):
+    if name == 'constraint_order':
+        return bind_all(
+            env,
+            middleware.build(middleware_factory.cib),
+            {
+                'set': constraint_order.create_with_set,
+                'show': constraint_order.show,
+            }
+        )
+
+    if name == 'constraint_colocation':
+        return bind_all(
+            env,
+            middleware.build(middleware_factory.cib),
+            {
+                'set': constraint_colocation.create_with_set,
+                'show': constraint_colocation.show,
+            }
+        )
+
+    if name == 'constraint_ticket':
+        return bind_all(
+            env,
+            middleware.build(middleware_factory.cib),
+            {
+                'set': constraint_ticket.create_with_set,
+                'show': constraint_ticket.show,
+                'add': constraint_ticket.create,
+            }
+        )
+
+    if name == "quorum":
+        return bind_all(
+            env,
+            middleware.build(middleware_factory.corosync_conf_existing),
+            {
+                "add_device": quorum.add_device,
+                "get_config": quorum.get_config,
+                "remove_device": quorum.remove_device,
+                "set_options": quorum.set_options,
+                "update_device": quorum.update_device,
+            }
+        )
+
+    raise Exception("No library part '{0}'".format(name))
+
+class Library(object):
+    def __init__(self, env, middleware_factory):
+        self.env = env
+        self.middleware_factory = middleware_factory
+
+    def __getattr__(self, name):
+        return get_module(self.env, self.middleware_factory, name)
diff --git a/pcs/cli/common/middleware.py b/pcs/cli/common/middleware.py
new file mode 100644
index 0000000..16618e1
--- /dev/null
+++ b/pcs/cli/common/middleware.py
@@ -0,0 +1,69 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from collections import namedtuple
+from functools import partial
+
+from pcs.cli.common import console_report
+
+
+def build(*middleware_list):
+    def run(command, env, *args, **kwargs):
+        next_in_line = command
+        for next_command in reversed(middleware_list):
+            next_in_line = partial(next_command, next_in_line)
+
+        return next_in_line(env, *args, **kwargs)
+    return run
+
+def cib(use_local_cib, load_cib_content, write_cib):
+    """
+    return configured middleware that cares about local cib
+    bool use_local_cib is flag if local cib was required
+    callable load_cib_content returns local cib content, take no params
+    callable write_cib put content of cib to required place
+    """
+    def apply(next_in_line, env, *args, **kwargs):
+        if use_local_cib:
+            env.cib_data = load_cib_content()
+
+        result_of_next = next_in_line(env, *args, **kwargs)
+
+        if use_local_cib:
+            write_cib(env.cib_data)
+
+        return result_of_next
+    return apply
+
+def corosync_conf_existing(local_file_path):
+    def apply(next_in_line, env, *args, **kwargs):
+        if local_file_path:
+            try:
+                env.corosync_conf_data = open(local_file_path).read()
+            except EnvironmentError as e:
+                console_report.error("Unable to read {0}: {1}".format(
+                    local_file_path,
+                    e.strerror
+                ))
+
+        result_of_next = next_in_line(env, *args, **kwargs)
+
+        if local_file_path:
+            try:
+                f = open(local_file_path, "w")
+                f.write(env.corosync_conf_data)
+                f.close()
+            except EnvironmentError as e:
+                console_report.error("Unable to write {0}: {1}".format(
+                    local_file_path,
+                    e.strerror
+                ))
+        return result_of_next
+    return apply
+
+def create_middleware_factory(**kwargs):
+    return namedtuple('MiddlewareFactory', kwargs.keys())(**kwargs)
diff --git a/pcs/cli/common/parse_args.py b/pcs/cli/common/parse_args.py
new file mode 100644
index 0000000..3b01775
--- /dev/null
+++ b/pcs/cli/common/parse_args.py
@@ -0,0 +1,27 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.cli.common.errors import CmdLineInputError
+
+def split_list(arg_list, separator):
+    """return list of list of arg_list using separator as delimiter"""
+    separator_indexes = [i for i, x in enumerate(arg_list) if x == separator]
+    bounds = zip([0]+[i+1 for i in separator_indexes], separator_indexes+[None])
+    return [arg_list[i:j] for i, j in bounds]
+
+def prepare_options(cmdline_args):
+    """return dictionary of options from comandline key=value args"""
+    options = dict()
+    for arg in cmdline_args:
+        if "=" not in arg:
+            raise CmdLineInputError("missing value of '{0}' option".format(arg))
+        if arg.startswith("="):
+            raise CmdLineInputError("missing key in '{0}' option".format(arg))
+
+        name, value = arg.split("=", 1)
+        options[name] = value
+    return options
diff --git a/pcs/cli/common/reports.py b/pcs/cli/common/reports.py
new file mode 100644
index 0000000..367f9aa
--- /dev/null
+++ b/pcs/cli/common/reports.py
@@ -0,0 +1,80 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import sys
+
+from pcs.cli.constraint_all.console_report import duplicate_constraints_report
+from pcs.common import report_codes as codes
+from pcs.lib.errors import LibraryError, ReportItemSeverity
+
+
+__CODE_BUILDER_MAP = {
+    codes.DUPLICATE_CONSTRAINTS_EXIST: duplicate_constraints_report,
+}
+
+
+class LibraryReportProcessorToConsole(object):
+    def __init__(self, debug=False):
+        self.debug = debug
+
+    def process(self, report_item):
+        self.process_list([report_item])
+
+    def process_list(self, report_item_list):
+        errors = []
+        for report_item in report_item_list:
+            if report_item.severity == ReportItemSeverity.ERROR:
+                errors.append(report_item)
+            elif report_item.severity == ReportItemSeverity.WARNING:
+                print("Warning: " + _build_report_message(report_item))
+            elif self.debug or report_item.severity != ReportItemSeverity.DEBUG:
+                print(report_item.message)
+        if errors:
+            raise LibraryError(*errors)
+
+def _prepare_force_text(report_item):
+    if report_item.forceable == codes.SKIP_OFFLINE_NODES:
+        return ", use --skip-offline to override"
+    return ", use --force to override" if report_item.forceable else ""
+
+def _build_report_message(report_item, force_text=""):
+    get_template = __CODE_BUILDER_MAP.get(
+        report_item.code,
+        lambda report_item: report_item.message + "{force}"
+    )
+
+    return get_template(report_item).format(force=force_text)
+
+def process_library_reports(report_item_list, is_forced=False):
+    """
+    report_item_list list of ReportItem
+    """
+    critical_error = False
+    for report_item in report_item_list:
+        if report_item.severity == ReportItemSeverity.WARNING:
+            print("Warning: " + report_item.message)
+            continue
+
+        if report_item.severity != ReportItemSeverity.ERROR:
+            print(report_item.message)
+            continue
+
+        if report_item.forceable and is_forced:
+            # Let the user know what may be wrong even when --force is used,
+            # as it may be used for override early errors hiding later
+            # errors otherwise.
+            print("Warning: " + report_item.message)
+            continue
+
+        sys.stderr.write('Error: {0}\n'.format(_build_report_message(
+            report_item,
+            _prepare_force_text(report_item)
+        )))
+        critical_error = True
+
+    if critical_error:
+        sys.exit(1)
diff --git a/pcs/cli/common/test/__init__.py b/pcs/cli/common/test/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/cli/common/test/test_completion.py b/pcs/cli/common/test/test_completion.py
new file mode 100644
index 0000000..865da2c
--- /dev/null
+++ b/pcs/cli/common/test/test_completion.py
@@ -0,0 +1,166 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+
+from pcs.cli.common.completion import (
+    _find_suggestions,
+    has_applicable_environment,
+    make_suggestions,
+    _split_words,
+)
+
+tree = {
+    "resource": {
+        "op": {
+            "add": {},
+            "defaults": {},
+            "remove": {},
+        },
+        "clone": {},
+    },
+    "cluster": {
+        "auth": {},
+        "cib": {},
+    }
+}
+
+class SuggestionTest(TestCase):
+    def test_suggest_nothing_when_cursor_on_first_word(self):
+        self.assertEqual([], _find_suggestions(tree, ['pcs'], 0))
+        self.assertEqual([], _find_suggestions(tree, ['pcs', 'resource'], 0))
+
+    def test_suggest_nothing_when_cursor_possition_out_of_range(self):
+        self.assertEqual([], _find_suggestions(tree, ['pcs', 'resource'], 3))
+
+    def test_suggest_when_last_word_not_started(self):
+        self.assertEqual(
+            ["clone", "op"],
+            _find_suggestions(tree, ['pcs', 'resource'], 2)
+        )
+
+    def test_suggest_when_last_word_started(self):
+        self.assertEqual(
+            ["clone"],
+            _find_suggestions(tree, ['pcs', 'resource', 'c'], 2)
+        )
+
+    def test_suggest_when_cursor_on_word_amid(self):
+        self.assertEqual(
+            ["clone"],
+            _find_suggestions(tree, ['pcs', 'resource', 'c', 'add'], 2)
+        )
+
+    def test_suggest_nothing_when_previously_typed_word_not_match(self):
+        self.assertEqual(
+            [],
+            _find_suggestions(tree, ['pcs', 'invalid', 'c'], 2)
+        )
+
+class HasCompletionEnvironmentTest(TestCase):
+    def test_returns_false_if_environment_inapplicable(self):
+        inapplicable_environments = [
+            {
+                'COMP_CWORD': '1',
+                'PCS_AUTO_COMPLETE': '1',
+            },
+            {
+                'COMP_WORDS': 'pcs resource',
+                'PCS_AUTO_COMPLETE': '1',
+            },
+            {
+                'COMP_WORDS': 'pcs resource',
+                'COMP_CWORD': '1',
+            },
+            {
+                'COMP_WORDS': 'pcs resource',
+                'COMP_CWORD': '1',
+                'PCS_AUTO_COMPLETE': '0',
+            },
+            {
+                'COMP_WORDS': 'pcs resource',
+                'COMP_CWORD': '1a',
+                'PCS_AUTO_COMPLETE': '1',
+            },
+            {
+                'COMP_WORDS': 'pcs resource',
+                'COMP_CWORD': '1',
+                'PCS_AUTO_COMPLETE': '1',
+            },
+        ]
+        for environment in inapplicable_environments:
+            self.assertFalse(
+                has_applicable_environment(environment),
+                'environment evaluated as applicable (should not be): '
+                +repr(environment)
+            )
+
+    def test_returns_true_if_environment_is_set(self):
+        self.assertTrue(has_applicable_environment({
+            "COMP_WORDS": "pcs resource",
+            "COMP_CWORD": '1',
+            "COMP_LENGTHS": "3 8",
+            "PCS_AUTO_COMPLETE": "1",
+        }))
+
+class MakeSuggestionsEnvironment(TestCase):
+    def test_raises_for_incomlete_environment(self):
+        self.assertRaises(
+            EnvironmentError,
+            lambda: make_suggestions(
+                {
+                    'COMP_CWORD': '1',
+                    'PCS_AUTO_COMPLETE': '1',
+                },
+                suggestion_tree=tree
+            )
+        )
+
+    def test_suggest_on_correct_environment(self):
+        self.assertEqual(
+            "clone\nop",
+            make_suggestions(
+                {
+                    "COMP_WORDS": "pcs resource",
+                    "COMP_CWORD": "2",
+                    "COMP_LENGTHS": "3 8",
+                    "PCS_AUTO_COMPLETE": "1",
+                },
+                suggestion_tree=tree
+            )
+        )
+
+class SplitWordsTest(TestCase):
+    def test_return_word_list_on_compatible_words_and_lenght(self):
+        self.assertEqual(
+            ["pcs", "resource", "op", "a"],
+            _split_words("pcs resource op a", ["3", "8", "2", "1"])
+        )
+
+    def test_refuse_when_no_int_in_lengths(self):
+        self.assertRaises(
+            EnvironmentError,
+            lambda: _split_words("pcs resource op a", ["3", "8", "2", "A"])
+        )
+
+    def test_refuse_when_lengths_are_too_big(self):
+        self.assertRaises(
+            EnvironmentError,
+            lambda: _split_words("pcs resource op a", ["3", "8", "2", "10"])
+        )
+
+    def test_refuse_when_separator_doesnot_match(self):
+        self.assertRaises(
+            EnvironmentError,
+            lambda: _split_words("pc sresource op a", ["3", "8", "2", "1"])
+        )
+
+    def test_refuse_when_lengths_are_too_small(self):
+        self.assertRaises(
+            EnvironmentError,
+            lambda: _split_words("pcs resource op a ", ["3", "8", "2", "1"])
+        )
diff --git a/pcs/cli/common/test/test_console_report.py b/pcs/cli/common/test/test_console_report.py
new file mode 100644
index 0000000..23cf8e9
--- /dev/null
+++ b/pcs/cli/common/test/test_console_report.py
@@ -0,0 +1,22 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+from pcs.cli.common.console_report import indent
+
+class IndentTest(TestCase):
+    def test_indent_list_of_lines(self):
+        self.assertEqual(
+            indent([
+                "first",
+                "second"
+            ]),
+            [
+                "  first",
+                "  second"
+            ]
+        )
diff --git a/pcs/cli/common/test/test_lib_wrapper.py b/pcs/cli/common/test/test_lib_wrapper.py
new file mode 100644
index 0000000..f34d2d0
--- /dev/null
+++ b/pcs/cli/common/test/test_lib_wrapper.py
@@ -0,0 +1,35 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+from unittest import TestCase
+
+from pcs.cli.common.lib_wrapper import Library
+from pcs.test.tools.pcs_mock import mock
+
+class LibraryWrapperTest(TestCase):
+    def test_raises_for_bad_path(self):
+        mock_middleware_factory = mock.MagicMock()
+        lib = Library('env', mock_middleware_factory)
+        self.assertRaises(Exception, lambda:lib.no_valid_library_part)
+
+    @mock.patch('pcs.cli.common.lib_wrapper.constraint_order.create_with_set')
+    @mock.patch('pcs.cli.common.lib_wrapper.cli_env_to_lib_env')
+    def test_bind_to_library(self, mock_cli_env_to_lib_env, mock_order_set):
+        lib_env = mock.MagicMock()
+        lib_env.is_cib_live = True
+        lib_env.is_corosync_conf_live = True
+        mock_cli_env_to_lib_env.return_value = lib_env
+
+        def dummy_middleware(next_in_line, env, *args, **kwargs):
+            return next_in_line(env, *args, **kwargs)
+
+
+        mock_middleware_factory = mock.MagicMock()
+        mock_middleware_factory.cib = dummy_middleware
+        mock_middleware_factory.corosync_conf_existing = dummy_middleware
+        Library('env', mock_middleware_factory).constraint_order.set('first', second="third")
+
+        mock_order_set.assert_called_once_with(lib_env, "first", second="third")
diff --git a/pcs/cli/common/test/test_middleware.py b/pcs/cli/common/test/test_middleware.py
new file mode 100644
index 0000000..6179882
--- /dev/null
+++ b/pcs/cli/common/test/test_middleware.py
@@ -0,0 +1,40 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+import pcs.cli.common.middleware
+
+
+class MiddlewareBuildTest(TestCase):
+    def test_run_middleware_correctly_chained(self):
+        log = []
+        def command(lib, argv, modificators):
+            log.append('command: {0}, {1}, {2}'.format(lib, argv, modificators))
+
+        def m1(next, lib, argv, modificators):
+            log.append(
+                'm1 start: {0}, {1}, {2}'.format(lib, argv, modificators)
+            )
+            next(lib, argv, modificators)
+            log.append('m1 done')
+
+        def m2(next, lib, argv, modificators):
+            log.append(
+                'm2 start: {0}, {1}, {2}'.format(lib, argv, modificators)
+            )
+            next(lib, argv, modificators)
+            log.append('m2 done')
+
+        run_with_middleware = pcs.cli.common.middleware.build(m1, m2)
+        run_with_middleware(command, "1", "2", "3")
+        self.assertEqual(log, [
+            'm1 start: 1, 2, 3',
+            'm2 start: 1, 2, 3',
+            'command: 1, 2, 3',
+            'm2 done',
+            'm1 done',
+        ])
diff --git a/pcs/cli/common/test/test_parse_args.py b/pcs/cli/common/test/test_parse_args.py
new file mode 100644
index 0000000..1d6c4b0
--- /dev/null
+++ b/pcs/cli/common/test/test_parse_args.py
@@ -0,0 +1,44 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+from pcs.cli.common.parse_args import split_list, prepare_options
+from pcs.cli.common.errors import CmdLineInputError
+
+
+class PrepareOptionsTest(TestCase):
+    def test_refuse_option_without_value(self):
+        self.assertRaises(
+            CmdLineInputError, lambda: prepare_options(['abc'])
+        )
+
+    def test_prepare_option_dict_form_args(self):
+        self.assertEqual({'a': 'b', 'c': 'd'}, prepare_options(['a=b', 'c=d']))
+
+    def test_prepare_option_dict_with_empty_value(self):
+        self.assertEqual({'a': ''}, prepare_options(['a=']))
+
+    def test_refuse_option_without_key(self):
+        self.assertRaises(
+            CmdLineInputError, lambda: prepare_options(['=a'])
+        )
+
+class SplitListTest(TestCase):
+    def test_returns_list_with_original_when_separator_not_in_original(self):
+        self.assertEqual([['a', 'b']], split_list(['a', 'b'], 'c'))
+
+    def test_returns_splited_list(self):
+        self.assertEqual(
+            [['a', 'b'], ['c', 'd']],
+            split_list(['a', 'b', '|', 'c', 'd'], '|')
+        )
+
+    def test_behave_like_string_split_when_the_separator_edges(self):
+        self.assertEqual(
+            [[], ['a', 'b'], ['c', 'd'], []],
+            split_list(['|','a', 'b', '|', 'c', 'd', "|"], '|')
+        )
diff --git a/pcs/cli/constraint/__init__.py b/pcs/cli/constraint/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/cli/constraint/command.py b/pcs/cli/constraint/command.py
new file mode 100644
index 0000000..851b13b
--- /dev/null
+++ b/pcs/cli/constraint/command.py
@@ -0,0 +1,70 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.cli.constraint import parse_args, console_report
+from pcs.cli.common.console_report import indent
+
+def create_with_set(create_with_set_library_call, argv, modificators):
+    """
+    callable create_with_set_library_call create constraint with set
+    list argv part of comandline args
+        see usage for  "constraint (colocation|resource|ticket) set"
+    dict like object modificators can contain
+        "force" allows resource in clone/master and constraint duplicity
+        "autocorrect" allows correct resource to its clone/master parent
+    """
+    resource_set_list, constraint_options = parse_args.prepare_set_args(argv)
+    create_with_set_library_call(
+        resource_set_list, constraint_options,
+        can_repair_to_clone=modificators["autocorrect"],
+        resource_in_clone_alowed=modificators["force"],
+        duplication_alowed=modificators["force"],
+    )
+
+def show_constraints_with_set(constraint_list, show_detail, indent_step=2):
+    """
+    return list of console lines with info about constraints
+    list of dict constraint_list see constraint in pcs/lib/exchange_formats.md
+    bool with_id have to show id with options
+    int indent_step is count of spaces for indenting
+    """
+    return ["Resource Sets:"] + indent(
+        [
+            console_report.constraint_with_sets(constraint, with_id=show_detail)
+            for constraint in constraint_list
+        ],
+        indent_step=indent_step
+    )
+
+def show(caption, load_constraints, format_options, modificators):
+    """
+    load constraints and return console lines list with info about constraints
+    string caption for example "Ticket Constraints:"
+    callable load_constraints which returns desired constraints as dictionary
+        like {"plain": [], "with_resource_sets": []}
+    callable format_options takes dict of options and show_detail flag (bool)
+        and returns string with constraint formated for commandline
+    modificators dict like object with command modificators
+    """
+    show_detail = modificators["full"]
+    constraints = load_constraints()
+
+    line_list = [caption]
+    line_list.extend([
+        "  " + format_options(constraint_options_dict, show_detail)
+        for constraint_options_dict in constraints["plain"]
+    ])
+
+    if constraints["with_resource_sets"]:
+        line_list.extend(
+            indent(show_constraints_with_set(
+                constraints["with_resource_sets"],
+                show_detail
+            ))
+        )
+
+    return line_list
diff --git a/pcs/cli/constraint/console_report.py b/pcs/cli/constraint/console_report.py
new file mode 100644
index 0000000..c9c84ef
--- /dev/null
+++ b/pcs/cli/constraint/console_report.py
@@ -0,0 +1,58 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+
+def constraint_plain(constraint_type, constraint_info, with_id=False):
+    return constraint_type + " ".join(
+        prepare_options(constraint_info["options"], with_id)
+    )
+
+def resource_sets(set_list, with_id=True):
+    """
+    list of dict set_list see resource set
+        in pcs/lib/exchange_formats.md
+    """
+    report = []
+    for resource_set in set_list:
+        report.extend(
+            ["set"] + resource_set["ids"] + options(resource_set["options"])
+        )
+        if with_id:
+            report.append(id_from_options(resource_set["options"]))
+
+    return report
+
+def options(options_dict):
+    return [
+        key+"="+value
+        for key, value in sorted(options_dict.items())
+        if key != "id"
+    ]
+
+def id_from_options(options_dict):
+    return "(id:"+options_dict.get("id", "")+")"
+
+def constraint_with_sets(constraint_info, with_id=True):
+    """
+    dict constraint_info  see constraint in pcs/lib/exchange_formats.md
+    bool with_id have to show id with options_dict
+    """
+    options_dict = options(constraint_info["options"])
+    return " ".join(
+        resource_sets(constraint_info["resource_sets"], with_id)
+        +
+        (["setoptions"] + options_dict if options_dict else [])
+        +
+        ([id_from_options(constraint_info["options"])] if with_id else [])
+    )
+
+def prepare_options(options_dict, with_id=True):
+    return (
+        options(options_dict)
+        +
+        ([id_from_options(options_dict)] if with_id else [])
+    )
diff --git a/pcs/cli/constraint/parse_args.py b/pcs/cli/constraint/parse_args.py
new file mode 100644
index 0000000..41ec182
--- /dev/null
+++ b/pcs/cli/constraint/parse_args.py
@@ -0,0 +1,48 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.cli.common import parse_args
+from pcs.cli.common.errors import CmdLineInputError
+
+
+def prepare_resource_sets(cmdline_args):
+    return [
+        {
+            "ids": [id for id in args if "=" not in id],
+            "options": parse_args.prepare_options(
+                [opt for opt in args if "=" in opt]
+            ),
+        } for args in parse_args.split_list(cmdline_args, "set")
+    ]
+
+def prepare_set_args(argv):
+    if argv.count("setoptions") > 1:
+        raise CmdLineInputError(
+            "Keyword 'setoptions' may be mentioned at most once"
+        )
+
+    resource_set_args, constraint_options_args = (
+        parse_args.split_list(argv, "setoptions")
+        if "setoptions" in argv else (argv, [])
+    )
+
+    if not resource_set_args:
+        raise CmdLineInputError()
+
+    resource_set_list = prepare_resource_sets(resource_set_args)
+    if(
+        not resource_set_list
+        or
+        not all(resource_set["ids"] for resource_set in resource_set_list)
+    ):
+        raise CmdLineInputError()
+
+    constraint_options = {}
+    if constraint_options_args:
+        constraint_options = parse_args.prepare_options(constraint_options_args)
+
+    return (resource_set_list, constraint_options)
diff --git a/pcs/cli/constraint/test/__init__.py b/pcs/cli/constraint/test/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/cli/constraint/test/test_command.py b/pcs/cli/constraint/test/test_command.py
new file mode 100644
index 0000000..5b493cd
--- /dev/null
+++ b/pcs/cli/constraint/test/test_command.py
@@ -0,0 +1,81 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+from pcs.cli.constraint import command
+
+from pcs.test.tools.pcs_mock import mock
+
+def fixture_constraint():
+    return {
+        "resource_sets": [
+            {"ids": ["a", "b"], "options": {"c": "d", "e": "f"}},
+            {"ids": ["g", "h"], "options": {"i": "j", "k": "l"}},
+        ],
+        "options": {"m": "n", "o":"p"}
+    }
+
+def fixture_constraint_console():
+    return "  set a b c=d e=f (id:) set g h i=j k=l (id:) setoptions m=n o=p (id:)"
+
+
+class ShowConstraintsWithSetTest(TestCase):
+    def test_return_line_list(self):
+        self.assertEqual(
+            [
+                "Resource Sets:",
+                "  set a b c=d e=f set g h i=j k=l setoptions m=n o=p",
+            ],
+            command.show_constraints_with_set(
+                [fixture_constraint()],
+                show_detail=False
+            )
+        )
+
+    def test_return_line_list_with_id(self):
+        self.assertEqual(
+            [
+                "Resource Sets:",
+                fixture_constraint_console(),
+            ],
+            command.show_constraints_with_set(
+                [fixture_constraint()],
+                show_detail=True
+            )
+        )
+
+class ShowTest(TestCase):
+    def test_show_only_caption_when_no_constraint_loaded(self):
+        self.assertEqual(["caption"], command.show(
+            "caption",
+            load_constraints=lambda: {"plain": [], "with_resource_sets": []},
+            format_options=lambda: None,
+            modificators={"full": False}
+        ))
+
+    def test_show_constraints_full(self):
+        load_constraints = mock.Mock()
+        load_constraints.return_value = {
+            "plain": [{"options": {"id": "plain_id"}}],
+            "with_resource_sets": [fixture_constraint()]
+        }
+        format_options = mock.Mock()
+        format_options.return_value = "plain constraint listing"
+        self.assertEqual(
+            [
+                "caption",
+                "  plain constraint listing",
+                "  Resource Sets:",
+                "  "+fixture_constraint_console(),
+            ],
+            command.show(
+                "caption",
+                load_constraints,
+                format_options,
+                {"full": True}
+            )
+        )
diff --git a/pcs/cli/constraint/test/test_console_report.py b/pcs/cli/constraint/test/test_console_report.py
new file mode 100644
index 0000000..b20bc80
--- /dev/null
+++ b/pcs/cli/constraint/test/test_console_report.py
@@ -0,0 +1,75 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+from pcs.cli.constraint import console_report
+
+class OptionsTest(TestCase):
+    def test_get_console_options_from_lib_options(self):
+        self.assertEqual(
+            ["a=b", "c=d"],
+            console_report.options({"c": "d", "a": "b", "id":"some_id"})
+        )
+
+class IdFromOptionsTest(TestCase):
+    def test_get_id_from_options(self):
+        self.assertEqual(
+            '(id:some_id)',
+            console_report.id_from_options({"c": "d", "a": "b", "id":"some_id"})
+        )
+
+class PrepareOptionsTest(TestCase):
+    def test_prepare_options_with_id(self):
+        self.assertEqual(
+            ["a=b", "c=d", '(id:some_id)'],
+            console_report.prepare_options({"c": "d", "a": "b", "id":"some_id"})
+        )
+    def test_prepare_options_without_id(self):
+        self.assertEqual(
+            ["a=b", "c=d"],
+            console_report.prepare_options(
+                {"c": "d", "a": "b", "id":"some_id"},
+                with_id=False
+            )
+        )
+
+class ResourceSetsTest(TestCase):
+    def test_prepare_resource_sets_without_id(self):
+        self.assertEqual(
+            ['set', 'a', 'b', 'c=d', 'e=f', 'set', 'g', 'h', 'i=j', 'k=l'],
+            console_report.resource_sets(
+                [
+                    {
+                        "ids": ["a", "b"],
+                        "options": {"c": "d", "e": "f", "id": "some_id"},
+                    },
+                    {
+                        "ids": ["g", "h"],
+                        "options": {"i": "j", "k": "l", "id": "some_id_2"},
+                    },
+                ],
+                with_id=False
+            )
+        )
+
+    def test_prepare_resource_sets_with_id(self):
+        self.assertEqual(
+            [
+                'set', 'a', 'b', 'c=d', 'e=f', '(id:some_id)',
+                'set', 'g', 'h', 'i=j', 'k=l', '(id:some_id_2)'
+            ],
+            console_report.resource_sets([
+                {
+                    "ids": ["a", "b"],
+                    "options": {"c": "d", "e": "f", "id": "some_id"},
+                },
+                {
+                    "ids": ["g", "h"],
+                    "options": {"i": "j", "k": "l", "id": "some_id_2"},
+                },
+            ])
+        )
diff --git a/pcs/cli/constraint/test/test_parse_args.py b/pcs/cli/constraint/test/test_parse_args.py
new file mode 100644
index 0000000..7673023
--- /dev/null
+++ b/pcs/cli/constraint/test/test_parse_args.py
@@ -0,0 +1,82 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+
+from pcs.cli.common.errors import CmdLineInputError
+from pcs.cli.constraint.parse_args import prepare_set_args, prepare_resource_sets
+
+
+try:
+    import unittest.mock as mock
+except ImportError:
+    import mock
+
+
+ at mock.patch("pcs.cli.common.parse_args.prepare_options")
+class PrepareResourceSetsTest(TestCase):
+    def test_prepare_resource_sets(self, options):
+        opts = [{"id": "1"}, {"id": "2", "sequential": "true"}]
+        options.side_effect = opts
+        self.assertEqual(
+            [
+                {"ids": ["resA", "resB"], "options":opts[0]},
+                {"ids": ["resC"], "options": opts[1]},
+            ],
+            prepare_resource_sets([
+                "resA", "resB", "id=resource-set-1",
+                "set",
+                "resC", "id=resource-set-2", "sequential=true",
+            ])
+        )
+
+    def test_has_no_responsibility_to_assess_the_content(self, options):
+        options.return_value = {}
+        self.assertEqual([{"ids":[], "options":{}}], prepare_resource_sets([]))
+
+ at mock.patch("pcs.cli.common.parse_args.prepare_options")
+ at mock.patch("pcs.cli.constraint.parse_args.prepare_resource_sets")
+class PrepareSetArgvTest(TestCase):
+    def test_return_tuple_of_given_resource_set_list_and_options(
+        self, res_sets, options
+    ):
+        res_sets.return_value = [{"ids": "A"}]
+        options.return_value = 'O'
+
+        self.assertEqual(
+            ([{"ids": "A"}], "O"),
+            prepare_set_args(['A', 'b=c', "setoptions", "d=e"])
+        )
+
+    def test_right_distribute_full_args(self, res_sets, options):
+        prepare_set_args(['A', 'b=c', "setoptions", "d=e"])
+        res_sets.assert_called_once_with(['A', 'b=c'])
+        options.assert_called_once_with(["d=e"])
+
+    def test_right_distribute_args_without_options(self, res_sets, options):
+        prepare_set_args(['A', 'b=c'])
+        res_sets.assert_called_once_with(['A', 'b=c'])
+        options.assert_not_called()
+
+    def test_right_distribute_args_with_empty_options(self, res_sets, options):
+        prepare_set_args(['A', 'b=c', 'setoptions'])
+        res_sets.assert_called_once_with(['A', 'b=c'])
+        options.assert_not_called()
+
+    def test_raises_when_no_set_specified(self, res_sets, options):
+        self.assertRaises(CmdLineInputError, lambda: prepare_set_args([]))
+        res_sets.assert_not_called()
+
+    def test_raises_when_no_resource_in_set(self, res_sets, options):
+        res_sets.return_value = [{"ids": [], "options": {"b": "c"}}]
+        self.assertRaises(CmdLineInputError, lambda: prepare_set_args(["b=c"]))
+        res_sets.assert_called_once_with(["b=c"])
+
+    def test_raises_when_setoption_more_than_once(self, res_sets, options):
+        self.assertRaises(CmdLineInputError, lambda: prepare_set_args(
+            ['A', 'b=c', 'setoptions', "c=d", "setoptions", "e=f"]
+        ))
diff --git a/pcs/cli/constraint_all/__init__.py b/pcs/cli/constraint_all/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/cli/constraint_all/console_report.py b/pcs/cli/constraint_all/console_report.py
new file mode 100644
index 0000000..b216010
--- /dev/null
+++ b/pcs/cli/constraint_all/console_report.py
@@ -0,0 +1,55 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.cli.constraint.console_report import (
+    constraint_plain as constraint_plain_default,
+    constraint_with_sets,
+)
+from pcs.cli.constraint_colocation.console_report import (
+    constraint_plain as colocation_plain
+)
+from pcs.cli.constraint_order.console_report import (
+    constraint_plain as order_plain
+)
+from pcs.cli.constraint_ticket.console_report import (
+    constraint_plain as ticket_plain
+)
+
+
+def constraint(constraint_type, constraint_info, with_id=True):
+    """
+    dict constraint_info  see constraint in pcs/lib/exchange_formats.md
+    bool with_id have to show id with options_dict
+    """
+    if "resource_sets" in constraint_info:
+        return constraint_with_sets(constraint_info, with_id)
+    return constraint_plain(constraint_type, constraint_info, with_id)
+
+def constraint_plain(constraint_type, options_dict, with_id=False):
+    """return console shape for any constraint_type of plain constraint"""
+    type_report_map = {
+        "rsc_colocation": colocation_plain,
+        "rsc_order": order_plain,
+        "rsc_ticket": ticket_plain,
+    }
+
+    if constraint_type not in type_report_map:
+        return constraint_plain_default(constraint_type, options_dict, with_id)
+
+    return type_report_map[constraint_type](options_dict, with_id)
+
+def duplicate_constraints_report(report_item):
+    line_list = []
+    for constraint_info in report_item.info["constraint_info_list"]:
+        line_list.append(
+            constraint(report_item.info["constraint_type"], constraint_info)
+        )
+
+    return (
+        "duplicate constraint already exists{force}\n"
+        + "\n".join(["  " + line for line in line_list])
+    )
diff --git a/pcs/cli/constraint_all/test/__init__.py b/pcs/cli/constraint_all/test/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/cli/constraint_all/test/test_console_report.py b/pcs/cli/constraint_all/test/test_console_report.py
new file mode 100644
index 0000000..1cf5721
--- /dev/null
+++ b/pcs/cli/constraint_all/test/test_console_report.py
@@ -0,0 +1,79 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+from pcs.test.tools.pcs_mock import mock
+from pcs.cli.constraint_all import console_report
+
+class ConstraintTest(TestCase):
+    @mock.patch("pcs.cli.constraint_all.console_report.constraint_plain")
+    def test_can_display_plain_constraint(self, mock_constraint_plain):
+        mock_constraint_plain.return_value = "plain"
+        self.assertEqual(
+            'plain',
+            console_report.constraint(
+                "rsc_ticket",
+                "constraint_in_library_representation"
+            )
+        )
+        mock_constraint_plain.assert_called_once_with(
+            "rsc_ticket",
+            "constraint_in_library_representation",
+            True
+        )
+
+    @mock.patch("pcs.cli.constraint_all.console_report.constraint_with_sets")
+    def test_can_display_constraint_with_set(self, mock_constraint_with_sets):
+        mock_constraint_with_sets.return_value = "with_set"
+        self.assertEqual(
+            'with_set',
+            console_report.constraint(
+                "rsc_ticket",
+                {"resource_sets": "some_resource_sets", "options": {"a": "b"}},
+                with_id=False
+            )
+        )
+        mock_constraint_with_sets.assert_called_once_with(
+            {"resource_sets": "some_resource_sets", "options": {"a": "b"}},
+            False
+        )
+
+class ConstraintPlainTest(TestCase):
+    @mock.patch("pcs.cli.constraint_all.console_report.colocation_plain")
+    def test_choose_right_reporter(self, mock_colocation_plain):
+        mock_colocation_plain.return_value = "some constraint formated"
+        self.assertEqual(
+            "some constraint formated",
+            console_report.constraint_plain(
+                "rsc_colocation",
+                "constraint_in_library_representation",
+                with_id=True
+            )
+        )
+        mock_colocation_plain.assert_called_once_with(
+            "constraint_in_library_representation",
+            True
+        )
+
+class DuplicateConstraintsReportTest(TestCase):
+    @mock.patch("pcs.cli.constraint_all.console_report.constraint")
+    def test_translate_from_report_item(self, mock_constraint):
+        report_item = mock.MagicMock()
+        report_item.info = {
+            "constraint_info_list": [{"options": {"a": "b"}}],
+            "constraint_type": "rsc_some"
+        }
+        mock_constraint.return_value = "constraint info"
+
+        self.assertEqual(
+            "\n".join([
+                "duplicate constraint already exists{force}",
+                "  constraint info"
+            ]),
+            console_report.duplicate_constraints_report(report_item)
+
+        )
diff --git a/pcs/cli/constraint_colocation/__init__.py b/pcs/cli/constraint_colocation/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/cli/constraint_colocation/command.py b/pcs/cli/constraint_colocation/command.py
new file mode 100644
index 0000000..b62fc7a
--- /dev/null
+++ b/pcs/cli/constraint_colocation/command.py
@@ -0,0 +1,39 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.cli.constraint import command
+from pcs.cli.constraint_colocation import console_report
+
+
+def create_with_set(lib, argv, modificators):
+    """
+    create colocation constraint with resource set
+    object lib exposes library
+    list argv see usage for "constraint colocation set"
+    dict like object modificators can contain
+        "force" allows resource in clone/master and constraint duplicity
+        "autocorrect" allows correct resource to its clone/master parent
+    """
+    command.create_with_set(
+        lib.constraint_colocation.set,
+        argv,
+        modificators,
+    )
+
+def show(lib, argv, modificators):
+    """
+    show all colocation constraints
+    object lib exposes library
+    list argv see usage for "constraint colocation show"
+    dict like object modificators can contain "full"
+    """
+    print("\n".join(command.show(
+         "Colocation Constraints:",
+        lib.constraint_colocation.show,
+        console_report.constraint_plain,
+        modificators,
+    )))
diff --git a/pcs/cli/constraint_colocation/console_report.py b/pcs/cli/constraint_colocation/console_report.py
new file mode 100644
index 0000000..0ede276
--- /dev/null
+++ b/pcs/cli/constraint_colocation/console_report.py
@@ -0,0 +1,28 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+def constraint_plain(constraint_info, with_id=False):
+    """
+    dict constraint_info see constraint in pcs/lib/exchange_formats.md
+    bool with_id have to show id with options_dict
+    """
+    options_dict = constraint_info["options"]
+    co_resource1 = options_dict.get("rsc", "")
+    co_resource2 = options_dict.get("with-rsc", "")
+    co_id = options_dict.get("id", "")
+    co_score = options_dict.get("score", "")
+    score_text = "(score:" + co_score + ")"
+    console_option_list = [
+        "(%s:%s)" % (option[0], option[1])
+        for option in sorted(options_dict.items())
+        if option[0] not in ("rsc", "with-rsc", "id", "score")
+    ]
+    if with_id:
+        console_option_list.append("(id:%s)" % co_id)
+    return " ".join(
+        [co_resource1, "with", co_resource2, score_text] + console_option_list
+    )
diff --git a/pcs/cli/constraint_order/__init__.py b/pcs/cli/constraint_order/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/cli/constraint_order/command.py b/pcs/cli/constraint_order/command.py
new file mode 100644
index 0000000..8b77dbd
--- /dev/null
+++ b/pcs/cli/constraint_order/command.py
@@ -0,0 +1,39 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.cli.constraint import command
+from pcs.cli.constraint_order import console_report
+
+
+def create_with_set(lib, argv, modificators):
+    """
+    create order constraint with resource set
+    object lib exposes library
+    list argv see usage for "constraint colocation set"
+    dict like object modificators can contain
+        "force" allows resource in clone/master and constraint duplicity
+        "autocorrect" allows correct resource to its clone/master parent
+    """
+    command.create_with_set(
+        lib.constraint_order.set,
+        argv,
+        modificators
+    )
+
+def show(lib, argv, modificators):
+    """
+    show all order constraints
+    object lib exposes library
+    list argv see usage for "constraint colocation show"
+    dict like object modificators can contain "full"
+    """
+    print("\n".join(command.show(
+        "Ordering Constraints:",
+        lib.constraint_order.show,
+        console_report.constraint_plain,
+        modificators,
+    )))
diff --git a/pcs/cli/constraint_order/console_report.py b/pcs/cli/constraint_order/console_report.py
new file mode 100644
index 0000000..8a54224
--- /dev/null
+++ b/pcs/cli/constraint_order/console_report.py
@@ -0,0 +1,53 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+from pcs.lib.pacemaker_values import is_true
+
+def constraint_plain(constraint_info, with_id=False):
+    """
+    dict constraint_info see constraint in pcs/lib/exchange_formats.md
+    bool with_id have to show id with options_dict
+    """
+    options = constraint_info["options"]
+    oc_resource1 = options.get("first", "")
+    oc_resource2 = options.get("then", "")
+    first_action = options.get("first-action", "")
+    then_action = options.get("then-action", "")
+    oc_id = options.get("id", "")
+    oc_score = options.get("score", "")
+    oc_kind = options.get("kind", "")
+    oc_sym = ""
+    oc_id_out = ""
+    oc_options = ""
+    if (
+        "symmetrical" in options
+        and
+        not is_true(options.get("symmetrical", "false"))
+    ):
+        oc_sym = "(non-symmetrical)"
+    if oc_kind != "":
+        score_text = "(kind:" + oc_kind + ")"
+    elif oc_kind == "" and oc_score == "":
+        score_text = "(kind:Mandatory)"
+    else:
+        score_text = "(score:" + oc_score + ")"
+    if with_id:
+        oc_id_out = "(id:"+oc_id+")"
+    already_processed_options = (
+        "first", "then", "first-action", "then-action", "id", "score", "kind",
+        "symmetrical"
+    )
+    oc_options = " ".join([
+        "{0}={1}".format(name, value)
+        for name, value in options.items()
+        if name not in already_processed_options
+    ])
+    if oc_options:
+        oc_options = "(Options: " + oc_options + ")"
+    return " ".join([arg for arg in [
+        first_action, oc_resource1, "then", then_action, oc_resource2,
+        score_text, oc_sym, oc_options, oc_id_out
+    ] if arg])
diff --git a/pcs/cli/constraint_ticket/__init__.py b/pcs/cli/constraint_ticket/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/cli/constraint_ticket/command.py b/pcs/cli/constraint_ticket/command.py
new file mode 100644
index 0000000..ab70434
--- /dev/null
+++ b/pcs/cli/constraint_ticket/command.py
@@ -0,0 +1,67 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.cli.common.errors import CmdLineInputError
+from pcs.cli.constraint import command
+from pcs.cli.constraint_ticket import parse_args, console_report
+
+def create_with_set(lib, argv, modificators):
+    """
+    create ticket constraint with resource set
+    object lib exposes library
+    list argv see usage for "constraint colocation set"
+    dict like object modificators can contain
+        "force" allows resource in clone/master and constraint duplicity
+        "autocorrect" allows correct resource to its clone/master parent
+    """
+    command.create_with_set(
+        lib.constraint_ticket.set,
+        argv,
+        modificators,
+    )
+
+def add(lib, argv, modificators):
+    """
+    create ticket constraint
+    object lib exposes library
+    list argv see usage for "constraint colocation add"
+    dict like object modificators can contain
+        "force" allows resource in clone/master and constraint duplicity
+        "autocorrect" allows correct resource to its clone/master parent
+    """
+    ticket, resource_id, resource_role, options = parse_args.parse_add(argv)
+    if "rsc-role" in options:
+        raise CmdLineInputError(
+            "Resource role must not be specified among options"
+            +", specify it before resource id"
+        )
+
+    if resource_role:
+        options["rsc-role"] = resource_role
+
+    lib.constraint_ticket.add(
+        ticket,
+        resource_id,
+        options,
+        autocorrection_allowed=modificators["autocorrect"],
+        resource_in_clone_alowed=modificators["force"],
+        duplication_alowed=modificators["force"],
+    )
+
+def show(lib, argv, modificators):
+    """
+    show all ticket constraints
+    object lib exposes library
+    list argv see usage for "constraint colocation show"
+    dict like object modificators can contain "full"
+    """
+    print("\n".join(command.show(
+        "Ticket Constraints:",
+        lib.constraint_ticket.show,
+        console_report.constraint_plain,
+        modificators,
+    )))
diff --git a/pcs/cli/constraint_ticket/console_report.py b/pcs/cli/constraint_ticket/console_report.py
new file mode 100644
index 0000000..657b867
--- /dev/null
+++ b/pcs/cli/constraint_ticket/console_report.py
@@ -0,0 +1,26 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+from pcs.cli.constraint.console_report import prepare_options
+
+
+def constraint_plain(constraint_info, with_id=False):
+    """
+    dict constraint_info  see constraint in pcs/lib/exchange_formats.md
+    bool with_id have to show id with options_dict
+    """
+    options = constraint_info["options"]
+    return " ".join(
+        [options.get("rsc-role", ""), options.get("rsc", "")]
+        +
+        prepare_options(
+            dict(
+                (name, value) for name, value in options.items()
+                if name not in ["rsc-role", "rsc"]
+            ),
+            with_id
+        )
+    )
diff --git a/pcs/cli/constraint_ticket/parse_args.py b/pcs/cli/constraint_ticket/parse_args.py
new file mode 100644
index 0000000..dfd2c1a
--- /dev/null
+++ b/pcs/cli/constraint_ticket/parse_args.py
@@ -0,0 +1,44 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.cli.common import parse_args
+from pcs.cli.common.errors import CmdLineInputError
+
+
+def separate_tail_option_candidates(arg_list):
+    for i, arg in enumerate(arg_list):
+        if "=" in arg:
+            return arg_list[:i], arg_list[i:]
+
+    return arg_list, []
+
+def parse_add(arg_list):
+    info, option_candidates = separate_tail_option_candidates(arg_list)
+
+    if not info:
+        raise CmdLineInputError("Ticket not specified")
+
+    ticket, resource_specification = info[0], info[1:]
+
+    if len(resource_specification) not in (1, 2):
+        raise CmdLineInputError(
+            "invalid resource specification: '{0}'"
+            .format(" ".join(resource_specification))
+        )
+
+    if len(resource_specification) == 2:
+        resource_role, resource_id = resource_specification
+    else:
+        resource_role = ""
+        resource_id = resource_specification[0]
+
+    return (
+        ticket,
+        resource_id,
+        resource_role,
+        parse_args.prepare_options(option_candidates)
+    )
diff --git a/pcs/cli/constraint_ticket/test/__init__.py b/pcs/cli/constraint_ticket/test/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/cli/constraint_ticket/test/test_command.py b/pcs/cli/constraint_ticket/test/test_command.py
new file mode 100644
index 0000000..045d336
--- /dev/null
+++ b/pcs/cli/constraint_ticket/test/test_command.py
@@ -0,0 +1,67 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+
+from pcs.test.tools.pcs_mock import mock
+from pcs.cli.common.errors import CmdLineInputError
+from pcs.cli.constraint_ticket import command
+
+class AddTest(TestCase):
+    @mock.patch("pcs.cli.constraint_ticket.command.parse_args.parse_add")
+    def test_call_library_with_correct_attrs(self, mock_parse_add):
+        mock_parse_add.return_value = (
+            "ticket", "resource_id", "", {"loss-policy": "fence"}
+        )
+        lib = mock.MagicMock()
+        lib.constraint_ticket = mock.MagicMock()
+        lib.constraint_ticket.add = mock.MagicMock()
+
+        command.add(lib, ["argv"], {"force": True, "autocorrect": True})
+
+        mock_parse_add.assert_called_once_with(["argv"])
+        lib.constraint_ticket.add.assert_called_once_with(
+            "ticket", "resource_id", {"loss-policy": "fence"},
+            autocorrection_allowed=True,
+            resource_in_clone_alowed=True,
+            duplication_alowed=True,
+        )
+
+    @mock.patch("pcs.cli.constraint_ticket.command.parse_args.parse_add")
+    def test_refuse_resource_role_in_options(self, mock_parse_add):
+        mock_parse_add.return_value = (
+            "ticket", "resource_id", "resource_role", {"rsc-role": "master"}
+        )
+        lib = None
+        self.assertRaises(
+            CmdLineInputError,
+            lambda: command.add(
+                lib,
+                ["argv"],
+                {"force": True, "autocorrect": True},
+            )
+        )
+
+    @mock.patch("pcs.cli.constraint_ticket.command.parse_args.parse_add")
+    def test_put_resource_role_to_options_for_library(self, mock_parse_add):
+        mock_parse_add.return_value = (
+            "ticket", "resource_id", "resource_role", {"loss-policy": "fence"}
+        )
+        lib = mock.MagicMock()
+        lib.constraint_ticket = mock.MagicMock()
+        lib.constraint_ticket.add = mock.MagicMock()
+
+        command.add(lib, ["argv"], {"force": True, "autocorrect": True})
+
+        mock_parse_add.assert_called_once_with(["argv"])
+        lib.constraint_ticket.add.assert_called_once_with(
+            "ticket", "resource_id",
+            {"loss-policy": "fence", "rsc-role": "resource_role"},
+            autocorrection_allowed=True,
+            resource_in_clone_alowed=True,
+            duplication_alowed=True,
+        )
diff --git a/pcs/cli/constraint_ticket/test/test_console_report.py b/pcs/cli/constraint_ticket/test/test_console_report.py
new file mode 100644
index 0000000..a8e570f
--- /dev/null
+++ b/pcs/cli/constraint_ticket/test/test_console_report.py
@@ -0,0 +1,23 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+from pcs.cli.constraint_ticket import console_report
+
+class ConstraintPlainTest(TestCase):
+    def test_prepare_report(self):
+        self.assertEqual(
+            "Master resourceA (id:some_id)",
+            console_report.constraint_plain(
+                {"options": {
+                    "rsc-role": "Master",
+                    "rsc": "resourceA",
+                    "id": "some_id"
+                }},
+                with_id=True
+            )
+        )
diff --git a/pcs/cli/constraint_ticket/test/test_parse_args.py b/pcs/cli/constraint_ticket/test/test_parse_args.py
new file mode 100644
index 0000000..9d23167
--- /dev/null
+++ b/pcs/cli/constraint_ticket/test/test_parse_args.py
@@ -0,0 +1,86 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+from pcs.cli.constraint_ticket import parse_args
+from pcs.cli.common.errors import CmdLineInputError
+
+class ParseAddTest(TestCase):
+    def test_parse_add_args(self):
+        self.assertEqual(
+            parse_args.parse_add(
+                ["T", "resource1", "ticket=T", "loss-policy=fence"]
+            ),
+            (
+                "T",
+                "resource1",
+                "",
+                {
+                    "ticket": "T",
+                    "loss-policy": "fence",
+                }
+            )
+        )
+
+    def test_parse_add_args_with_resource_role(self):
+        self.assertEqual(
+            parse_args.parse_add(
+                ["T", "master",  "resource1", "ticket=T", "loss-policy=fence"]
+            ),
+            (
+                "T",
+                "resource1",
+                "master",
+                {
+                    "ticket": "T",
+                    "loss-policy": "fence",
+                }
+            )
+        )
+
+    def test_raises_when_invalid_resource_specification(self):
+        self.assertRaises(
+            CmdLineInputError,
+            lambda: parse_args.parse_add(
+                ["T", "master", "resource1", "something_else"]
+            )
+        )
+
+    def test_raises_when_ticket_and_resource_not_specified(self):
+        self.assertRaises(
+            CmdLineInputError,
+            lambda: parse_args.parse_add(
+                ["loss-policy=fence"]
+            )
+        )
+
+    def test_raises_when_resource_not_specified(self):
+        self.assertRaises(
+            CmdLineInputError,
+            lambda: parse_args.parse_add(
+                ["T", "loss-policy=fence"]
+            )
+        )
+
+class SeparateTailOptionCandidatesTest(TestCase):
+    def test_separate_when_both_parts_there(self):
+        self.assertEqual(
+            (["a", "b"], ["c=d", "e=f"]),
+            parse_args.separate_tail_option_candidates(["a", "b", "c=d", "e=f"])
+        )
+
+    def test_returns_empty_head_when_options_there_only(self):
+        self.assertEqual(
+            ([], ["c=d", "e=f"]),
+            parse_args.separate_tail_option_candidates(["c=d", "e=f"])
+        )
+
+    def test_returns_empty_tail_when_no_options_there(self):
+        self.assertEqual(
+            (["a", "b"], []),
+            parse_args.separate_tail_option_candidates(["a", "b"])
+        )
diff --git a/pcs/cluster.py b/pcs/cluster.py
index 826f8d6..872b922 100644
--- a/pcs/cluster.py
+++ b/pcs/cluster.py
@@ -1,7 +1,9 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
 import os
 import subprocess
@@ -11,8 +13,8 @@ import socket
 import tempfile
 import datetime
 import json
+import time
 import xml.dom.minidom
-import threading
 try:
     # python2
     from commands import getstatusoutput
@@ -20,20 +22,26 @@ except ImportError:
     # python3
     from subprocess import getstatusoutput
 
-import settings
-import usage
-import utils
-import corosync_conf as corosync_conf_utils
-import pcsd
-import status
-import prop
-import resource
-import stonith
-import constraint
-from errors import ReportItem
-from errors import error_codes
-
-pcs_dir = os.path.dirname(os.path.realpath(__file__))
+from pcs import (
+    constraint,
+    node,
+    pcsd,
+    prop,
+    resource,
+    settings,
+    status,
+    stonith,
+    usage,
+    utils,
+)
+from pcs.utils import parallel_for_nodes
+from pcs.common import report_codes
+from pcs.lib import (
+    pacemaker as lib_pacemaker,
+    reports as lib_reports,
+)
+from pcs.lib.errors import ReportItemSeverity, LibraryError
+from pcs.lib.corosync import config_parser as corosync_conf_utils
 
 def cluster_cmd(argv):
     if len(argv) == 0:
@@ -55,7 +63,7 @@ def cluster_cmd(argv):
     elif (sub_cmd == "status"):
         status.cluster_status(argv)
     elif (sub_cmd == "pcsd-status"):
-        cluster_gui_status(argv)
+        status.cluster_pcsd_status(argv)
     elif (sub_cmd == "certkey"):
         cluster_certkey(argv)
     elif (sub_cmd == "auth"):
@@ -77,9 +85,9 @@ def cluster_cmd(argv):
     elif (sub_cmd == "kill"):
         kill_cluster(argv)
     elif (sub_cmd == "standby"):
-        node_standby(argv)
+        node.node_standby(argv)
     elif (sub_cmd == "unstandby"):
-        node_standby(argv, False)
+        node.node_standby(argv, False)
     elif (sub_cmd == "enable"):
         if "--all" in utils.pcs_options:
             enable_cluster_all()
@@ -97,7 +105,7 @@ def cluster_cmd(argv):
     elif (sub_cmd == "cib-push"):
         cluster_push(argv)
     elif (sub_cmd == "cib-upgrade"):
-        cluster_upgrade()
+        utils.cluster_upgrade()
     elif (sub_cmd == "edit"):
         cluster_edit(argv)
     elif (sub_cmd == "node"):
@@ -190,7 +198,7 @@ def auth_nodes(nodes):
         if password == None:
             password = utils.get_terminal_password()
 
-        auth_nodes_do(
+        utils.auth_nodes_do(
             set_nodes, username, password, '--force' in utils.pcs_options,
             '--local' in utils.pcs_options
         )
@@ -198,109 +206,9 @@ def auth_nodes(nodes):
         for node in set_nodes:
             print(node + ": Already authorized")
 
-def auth_nodes_do(nodes, username, password, force, local):
-    pcsd_data = {
-        'nodes': list(set(nodes)),
-        'username': username,
-        'password': password,
-        'force': force,
-        'local': local,
-    }
-    output, retval = utils.run_pcsdcli('auth', pcsd_data)
-    if retval == 0 and output['status'] == 'access_denied':
-        utils.err('Access denied')
-    if retval == 0 and output['status'] == 'ok' and output['data']:
-        failed = False
-        try:
-            if not output['data']['sync_successful']:
-                utils.err(
-                    "Some nodes had a newer tokens than the local node. "
-                    + "Local node's tokens were updated. "
-                    + "Please repeat the authentication if needed."
-                )
-            for node, result in output['data']['auth_responses'].items():
-                if result['status'] == 'ok':
-                    print("{0}: Authorized".format(node))
-                elif result['status'] == 'already_authorized':
-                    print("{0}: Already authorized".format(node))
-                elif result['status'] == 'bad_password':
-                    utils.err(
-                        "{0}: Username and/or password is incorrect".format(node),
-                        False
-                    )
-                    failed = True
-                elif result['status'] == 'noresponse':
-                    utils.err("Unable to communicate with {0}".format(node), False)
-                    failed = True
-                else:
-                    utils.err("Unexpected response from {0}".format(node), False)
-                    failed = True
-            if output['data']['sync_nodes_err']:
-                utils.err(
-                    (
-                        "Unable to synchronize and save tokens on nodes: {0}. "
-                        + "Are they authorized?"
-                    ).format(
-                        ", ".join(output['data']['sync_nodes_err'])
-                    ),
-                    False
-                )
-                failed = True
-        except:
-            utils.err('Unable to communicate with pcsd')
-        if failed:
-            sys.exit(1)
-        return
-    utils.err('Unable to communicate with pcsd')
-
-# If no arguments get current cluster node status, otherwise get listed
-# nodes status
-def cluster_gui_status(argv,dont_exit = False):
-    bad_nodes = False
-    if len(argv) == 0:
-        nodes = utils.getNodesFromCorosyncConf()
-        if len(nodes) == 0:
-            if utils.is_rhel6():
-                utils.err("no nodes found in cluster.conf")
-            else:
-                utils.err("no nodes found in corosync.conf")
-        bad_nodes = check_nodes(nodes, "  ")
-    else:
-        bad_nodes = check_nodes(argv, "  ")
-    if bad_nodes and not dont_exit:
-        sys.exit(2)
-
 def cluster_certkey(argv):
     return pcsd.pcsd_certkey(argv)
 
-# Check and see if pcsd is running on the nodes listed
-def check_nodes(nodes, prefix = ""):
-    bad_nodes = False
-    if not utils.is_rhel6():
-        pm_nodes = utils.getPacemakerNodesID(True)
-        cs_nodes = utils.getCorosyncNodesID(True)
-    for node in nodes:
-        status = utils.checkAuthorization(node)
-
-        if not utils.is_rhel6():
-            if node not in pm_nodes.values():
-                for n_id, n in cs_nodes.items():
-                    if node == n and n_id in pm_nodes:
-                        real_node_name = pm_nodes[n_id]
-                        if real_node_name == "(null)":
-                            real_node_name = "*Unknown*"
-                        node = real_node_name +  " (" + node + ")"
-                        break
-
-        if status[0] == 0:
-            print(prefix + node + ": Online")
-        elif status[0] == 3:
-            print(prefix + node + ": Unable to authenticate")
-            bad_nodes = True
-        else:
-            print(prefix + node + ": Offline")
-            bad_nodes = True
-    return bad_nodes
 
 def cluster_setup(argv):
     if len(argv) < 2:
@@ -309,6 +217,11 @@ def cluster_setup(argv):
 
     is_rhel6 = utils.is_rhel6()
     cluster_name = argv[0]
+    wait = False
+    wait_timeout = None
+    if "--start" in utils.pcs_options and "--wait" in utils.pcs_options:
+        wait_timeout = utils.validate_wait_get_timeout(False)
+        wait = True
 
     # get nodes' addresses
     udpu_rrp = False
@@ -353,10 +266,14 @@ def cluster_setup(argv):
 
     # parse, validate and complete options
     if is_rhel6:
-        options, messages = cluster_setup_parse_options_cman(utils.pcs_options)
+        options, messages = cluster_setup_parse_options_cman(
+            utils.pcs_options,
+            "--force" in utils.pcs_options
+        )
     else:
         options, messages = cluster_setup_parse_options_corosync(
-            utils.pcs_options
+            utils.pcs_options,
+            "--force" in utils.pcs_options
         )
     if udpu_rrp and "rrp_mode" not in options["transport_options"]:
         options["transport_options"]["rrp_mode"] = "passive"
@@ -414,6 +331,8 @@ def cluster_setup(argv):
             start_cluster([])
         if "--enable" in utils.pcs_options:
             enable_cluster([])
+        if wait:
+            wait_for_nodes_started([], wait_timeout)
 
     # setup on remote nodes
     else:
@@ -482,8 +401,11 @@ def cluster_setup(argv):
         # sync certificates as the last step because it restarts pcsd
         print()
         pcsd.pcsd_sync_certs([], exit_after_error=False)
+        if wait:
+            print()
+            wait_for_nodes_started(primary_addr_list, wait_timeout)
 
-def cluster_setup_parse_options_corosync(options):
+def cluster_setup_parse_options_corosync(options, force=False):
     messages = []
     parsed = {
         "transport_options": {
@@ -492,42 +414,41 @@ def cluster_setup_parse_options_corosync(options):
         "totem_options": {},
         "quorum_options": {},
     }
+    severity = ReportItemSeverity.WARNING if force else ReportItemSeverity.ERROR
+    forceable = None if force else report_codes.FORCE_OPTIONS
 
     transport = "udpu"
     if "--transport" in options:
         transport = options["--transport"]
-        if transport not in ("udp", "udpu"):
-            messages.append(ReportItem.error(
-                error_codes.UNKNOWN_TRANSPORT,
-                "unknown transport '{transport}'",
-                info={'transport': transport},
-                forceable=True,
+        allowed_transport = ("udp", "udpu")
+        if transport not in allowed_transport:
+            messages.append(lib_reports.invalid_option_value(
+                "transport",
+                transport,
+                allowed_transport,
+                severity,
+                forceable
             ))
     parsed["transport_options"]["transport"] = transport
 
     if transport == "udpu" and ("--addr0" in options or "--addr1" in options):
-        messages.append(ReportItem.error(
-            error_codes.NON_UDP_TRANSPORT_ADDR_MISMATCH,
-            '--addr0 and --addr1 can only be used with --transport=udp',
-        ))
+        messages.append(lib_reports.rrp_addresses_transport_mismatch())
     rrpmode = None
     if "--rrpmode" in options or "--addr0" in options:
         rrpmode = "passive"
         if "--rrpmode" in options:
             rrpmode = options["--rrpmode"]
-        if rrpmode not in ("passive", "active"):
-            messages.append(ReportItem.error(
-                error_codes.UNKNOWN_RRP_MODE,
-                '{rrpmode} is an unknown RRP mode',
-                info={'rrpmode': rrpmode},
-                forceable=True,
+        allowed_rrpmode = ("passive", "active")
+        if rrpmode not in allowed_rrpmode:
+            messages.append(lib_reports.invalid_option_value(
+                "RRP mode",
+                rrpmode,
+                allowed_rrpmode,
+                severity,
+                forceable
             ))
         if rrpmode == "active":
-            messages.append(ReportItem.error(
-                error_codes.RRP_ACTIVE_NOT_SUPPORTED,
-                "using a RRP mode of 'active' is not supported or tested",
-                forceable=True,
-            ))
+            messages.append(lib_reports.rrp_active_not_supported(force))
     if rrpmode:
         parsed["transport_options"]["rrp_mode"] = rrpmode
 
@@ -584,24 +505,15 @@ def cluster_setup_parse_options_corosync(options):
     for opt_name in (
         "--wait_for_all", "--auto_tie_breaker", "--last_man_standing"
     ):
-        allowed_values = ('0', '1')
+        allowed_values = ("0", "1")
         if opt_name in options and options[opt_name] not in allowed_values:
-            messages.append(ReportItem.error(
-                error_codes.INVALID_OPTION_VALUE,
-                "'{option_value}' is not a valid value for {option_name}, "
-                    +"use {allowed_values}"
-                ,
-                info={
-                    'option_name': opt_name,
-                    'option_value': options[opt_name],
-                    'allowed_values_raw': allowed_values,
-                    'allowed_values': ' or '.join(allowed_values),
-                },
+            messages.append(lib_reports.invalid_option_value(
+                opt_name, options[opt_name], allowed_values
             ))
 
     return parsed, messages
 
-def cluster_setup_parse_options_cman(options):
+def cluster_setup_parse_options_cman(options, force=False):
     messages = []
     parsed = {
         "transport_options": {
@@ -609,6 +521,8 @@ def cluster_setup_parse_options_cman(options):
         },
         "totem_options": {},
     }
+    severity = ReportItemSeverity.WARNING if force else ReportItemSeverity.ERROR
+    forceable = None if force else report_codes.FORCE_OPTIONS
 
     broadcast = ("--broadcast0" in options) or ("--broadcast1" in options)
     if broadcast:
@@ -620,57 +534,43 @@ def cluster_setup_parse_options_cman(options):
         if "--broadcast1" not in options:
             ring_missing_broadcast = "1"
         if ring_missing_broadcast:
-            messages.append(ReportItem.warning(
-                error_codes.CMAN_BROADCAST_ALL_RINGS,
-                'Enabling broadcast for ring {ring_missing_broadcast}'
-                    +' as CMAN does not support broadcast in only one ring'
-                ,
-                info={'ring_missing_broadcast': ring_missing_broadcast}
-            ))
+            messages.append(lib_reports.cman_broadcast_all_rings())
     else:
         transport = "udp"
         if "--transport" in options:
             transport = options["--transport"]
-            if transport not in ("udp", "udpu"):
-                messages.append(ReportItem.error(
-                    error_codes.UNKNOWN_TRANSPORT,
-                    "unknown transport '{transport}'",
-                    info={'transport': transport},
-                    forceable=True,
+            allowed_transport = ("udp", "udpu")
+            if transport not in allowed_transport:
+                messages.append(lib_reports.invalid_option_value(
+                    "transport",
+                    transport,
+                    allowed_transport,
+                    severity,
+                    forceable
                 ))
     parsed["transport_options"]["transport"] = transport
 
     if transport == "udpu":
-        messages.append(ReportItem.warning(
-            error_codes.CMAN_UDPU_RESTART_REQUIRED,
-            "Using udpu transport on a CMAN cluster, "
-                + "cluster restart is required after node add or remove"
-            ,
-        ))
+        messages.append(lib_reports.cman_udpu_restart_required())
     if transport == "udpu" and ("--addr0" in options or "--addr1" in options):
-        messages.append(ReportItem.error(
-            error_codes.NON_UDP_TRANSPORT_ADDR_MISMATCH,
-            '--addr0 and --addr1 can only be used with --transport=udp',
-        ))
+        messages.append(lib_reports.rrp_addresses_transport_mismatch())
 
     rrpmode = None
     if "--rrpmode" in options or "--addr0" in options:
         rrpmode = "passive"
         if "--rrpmode" in options:
             rrpmode = options["--rrpmode"]
-        if rrpmode not in ("passive", "active"):
-            messages.append(ReportItem.error(
-                error_codes.UNKNOWN_RRP_MODE,
-                '{rrpmode} is an unknown RRP mode',
-                info={'rrpmode': rrpmode},
-                forceable=True,
+        allowed_rrpmode = ("passive", "active")
+        if rrpmode not in allowed_rrpmode:
+            messages.append(lib_reports.invalid_option_value(
+                "RRP mode",
+                rrpmode,
+                allowed_rrpmode,
+                severity,
+                forceable
             ))
         if rrpmode == "active":
-            messages.append(ReportItem.error(
-                error_codes.RRP_ACTIVE_NOT_SUPPORTED,
-                "using a RRP mode of 'active' is not supported or tested",
-                forceable=True,
-            ))
+            messages.append(lib_reports.rrp_active_not_supported(force))
     if rrpmode:
         parsed["transport_options"]["rrp_mode"] = rrpmode
 
@@ -713,11 +613,7 @@ def cluster_setup_parse_options_cman(options):
     )
     for opt_name in ignored_options_names:
         if opt_name in options:
-            messages.append(ReportItem.warning(
-                error_codes.IGNORED_CMAN_UNSUPPORTED_OPTION,
-                '{option_name} ignored as it is not supported on CMAN clusters',
-                info={'option_name': opt_name}
-            ))
+            messages.append(lib_reports.cman_ignored_option(opt_name))
 
     return parsed, messages
 
@@ -930,12 +826,8 @@ def cluster_setup_create_cluster_conf(
         output, retval = utils.run(cmd_prefix + cmd_item["cmd"])
         if retval != 0:
             if output:
-                messages.append(
-                    ReportItem.info(error_codes.COMMON_INFO, output)
-                )
-            messages.append(
-                ReportItem.error(error_codes.COMMON_ERROR, cmd_item["err"])
-            )
+                messages.append(lib_reports.common_info(output))
+            messages.append(lib_reports.common_error(cmd_item["err"]))
             conf_temp.close()
             return "", messages
     conf_temp.seek(0)
@@ -954,8 +846,16 @@ def get_local_network():
         utils.err("unable to determine network address, is interface up?")
 
 def start_cluster(argv):
+    wait = False
+    wait_timeout = None
+    if "--wait" in utils.pcs_options:
+        wait_timeout = utils.validate_wait_get_timeout(False)
+        wait = True
+
     if len(argv) > 0:
         start_cluster_nodes(argv)
+        if wait:
+            wait_for_nodes_started(argv, wait_timeout)
         return
 
     print("Starting Cluster...")
@@ -979,18 +879,87 @@ def start_cluster(argv):
     if retval != 0:
         print(output)
         utils.err("unable to start pacemaker")
+    if wait:
+        wait_for_nodes_started([], wait_timeout)
 
 def start_cluster_all():
-    start_cluster_nodes(utils.getNodesFromCorosyncConf())
+    wait = False
+    wait_timeout = None
+    if "--wait" in utils.pcs_options:
+        wait_timeout = utils.validate_wait_get_timeout(False)
+        wait = True
+
+    all_nodes = utils.getNodesFromCorosyncConf()
+    start_cluster_nodes(all_nodes)
+
+    if wait:
+        wait_for_nodes_started(all_nodes, wait_timeout)
 
 def start_cluster_nodes(nodes):
-    threads = dict()
-    for node in nodes:
-        threads[node] = NodeStartThread(node)
-    error_list = utils.run_node_threads(threads)
+    error_list = parallel_for_nodes(utils.startCluster, nodes, quiet=True)
     if error_list:
         utils.err("unable to start all nodes\n" + "\n".join(error_list))
 
+def is_node_fully_started(node_status):
+    return (
+        "online" in node_status and "pending" in node_status
+        and
+        node_status["online"] and not node_status["pending"]
+    )
+
+def wait_for_local_node_started(stop_at, interval):
+    try:
+        while True:
+            node_status = lib_pacemaker.get_local_node_status(
+                utils.cmd_runner()
+            )
+            if is_node_fully_started(node_status):
+                return 0, "Started"
+            if datetime.datetime.now() > stop_at:
+                return 1, "Waiting timeout"
+            time.sleep(interval)
+    except LibraryError as e:
+        return 1, "Unable to get node status: {0}".format(
+            "\n".join([item.message for item in e.args])
+        )
+
+def wait_for_remote_node_started(node, stop_at, interval):
+    while True:
+        code, output = utils.getPacemakerNodeStatus(node)
+        # HTTP error, permission denied or unable to auth
+        # there is no point in trying again as it won't get magically fixed
+        if code in [1, 3, 4]:
+            return 1, output
+        if code == 0:
+            try:
+                status = json.loads(output)
+                if (is_node_fully_started(status)):
+                    return 0, "Started"
+            except (ValueError, KeyError):
+                # this won't get fixed either
+                return 1, "Unable to get node status"
+        if datetime.datetime.now() > stop_at:
+            return 1, "Waiting timeout"
+        time.sleep(interval)
+
+def wait_for_nodes_started(node_list, timeout=None):
+    timeout = 60 * 15 if timeout is None else timeout
+    interval = 2
+    stop_at = datetime.datetime.now() + datetime.timedelta(seconds=timeout)
+    print("Waiting for node(s) to start...")
+    if not node_list:
+        code, output = wait_for_local_node_started(stop_at, interval)
+        if code != 0:
+            utils.err(output)
+        else:
+            print(output)
+    else:
+        error_list = parallel_for_nodes(
+            wait_for_remote_node_started, node_list, stop_at, interval
+        )
+        if error_list:
+            utils.err("unable to verify all nodes have started")
+
 def stop_cluster_all():
     stop_cluster_nodes(utils.getNodesFromCorosyncConf())
 
@@ -1004,7 +973,7 @@ def stop_cluster_nodes(nodes):
         )
 
     stopping_all = set(nodes) >= set(all_nodes)
-    if not "--force" in utils.pcs_options and not stopping_all:
+    if "--force" not in utils.pcs_options and not stopping_all:
         error_list = []
         for node in nodes:
             retval, data = utils.get_remote_quorumtool_output(node)
@@ -1042,51 +1011,14 @@ def stop_cluster_nodes(nodes):
                 + "\n".join(error_list)
             )
 
-    threads = dict()
-    for node in nodes:
-        threads[node] = NodeStopPacemakerThread(node)
-    error_list = utils.run_node_threads(threads)
+    error_list = parallel_for_nodes(utils.stopPacemaker, nodes, quiet=True)
     if error_list:
         utils.err("unable to stop all nodes\n" + "\n".join(error_list))
 
-    threads = dict()
-    for node in nodes:
-        threads[node] = NodeStopCorosyncThread(node)
-    error_list = utils.run_node_threads(threads)
+    error_list = parallel_for_nodes(utils.stopCorosync, nodes, quiet=True)
     if error_list:
         utils.err("unable to stop all nodes\n" + "\n".join(error_list))
 
-def node_standby(argv,standby=True):
-    if len(argv) > 1:
-        if standby:
-            usage.cluster(["standby"])
-        else:
-            usage.cluster(["unstandby"])
-        sys.exit(1)
-
-    nodes = utils.getNodesFromPacemaker()
-
-    if "--all" not in utils.pcs_options:
-        options_node = []
-        if argv:
-            if argv[0] not in nodes:
-                utils.err(
-                    "node '%s' does not appear to exist in configuration"
-                    % argv[0]
-                )
-            else:
-                options_node = ["-N", argv[0]]
-        if standby:
-            utils.run(["crm_standby", "-v", "on"] + options_node)
-        else:
-            utils.run(["crm_standby", "-D"] + options_node)
-    else:
-        for node in nodes:
-            if standby:
-                utils.run(["crm_standby", "-v", "on", "-N", node])
-            else:
-                utils.run(["crm_standby", "-D", "-N", node])
-
 def enable_cluster(argv):
     if len(argv) > 0:
         enable_cluster_nodes(argv)
@@ -1120,16 +1052,11 @@ def disable_cluster_nodes(nodes):
 def destroy_cluster(argv):
     if len(argv) > 0:
         # stop pacemaker and resources while cluster is still quorate
-        threads = dict()
-        for node in argv:
-            threads[node] = NodeStopPacemakerThread(node)
-        error_list = utils.run_node_threads(threads)
+        nodes = argv
+        error_list = parallel_for_nodes(utils.stopPacemaker, nodes, quiet=True)
         # proceed with destroy regardless of errors
         # destroy will stop any remaining cluster daemons
-        threads = dict()
-        for node in argv:
-            threads[node] = NodeDestroyThread(node)
-        error_list = utils.run_node_threads(threads)
+        error_list = parallel_for_nodes(utils.destroyCluster, nodes, quiet=True)
         if error_list:
             utils.err("unable to destroy cluster\n" + "\n".join(error_list))
 
@@ -1138,10 +1065,10 @@ def stop_cluster(argv):
         stop_cluster_nodes(argv)
         return
 
-    if not "--force" in utils.pcs_options:
+    if "--force" not in utils.pcs_options:
         if utils.is_rhel6():
-            output_status, retval = utils.run(["cman_tool", "status"])
-            output_nodes, retval = utils.run([
+            output_status, dummy_retval = utils.run(["cman_tool", "status"])
+            output_nodes, dummy_retval = utils.run([
                 "cman_tool", "nodes", "-F", "id,type,votes,name"
             ])
             if output_status == output_nodes:
@@ -1151,7 +1078,7 @@ def stop_cluster(argv):
                 output = output_status + "\n---Votes---\n" + output_nodes
             quorum_info = utils.parse_cman_quorum_info(output)
         else:
-            output, retval = utils.run(["corosync-quorumtool", "-p", "-s"])
+            output, dummy_retval = utils.run(["corosync-quorumtool", "-p", "-s"])
             # retval is 0 on success if node is not in partition with quorum
             # retval is 1 on error OR on success if node has quorum
             quorum_info = utils.parse_quorumtool_output(output)
@@ -1201,8 +1128,8 @@ def stop_cluster_corosync():
 
 def kill_cluster(argv):
     daemons = ["crmd", "pengine", "attrd", "lrmd", "stonithd", "cib", "pacemakerd", "corosync"]
-    output, retval = utils.run(["killall", "-9"] + daemons)
-#    if retval != 0:
+    dummy_output, dummy_retval = utils.run(["killall", "-9"] + daemons)
+#    if dummy_retval != 0:
 #        print "Error: unable to execute killall -9"
 #        print output
 #        sys.exit(1)
@@ -1252,12 +1179,6 @@ def cluster_push(argv):
     else:
         print("CIB updated")
 
-def cluster_upgrade():
-    output, retval = utils.run(["cibadmin", "--upgrade", "--force"])
-    if retval != 0:
-        utils.err("unable to upgrade cluster: %s" % output)
-    print("Cluster CIB has been upgraded to latest version")
-
 def cluster_edit(argv):
     if 'EDITOR' in os.environ:
         if len(argv) > 1:
@@ -1337,7 +1258,7 @@ def get_cib(argv):
             f = open(filename, 'w')
             output = utils.get_cib(scope)
             if output != "":
-                    f.write(output)
+                f.write(output)
             else:
                 utils.err("No data in the CIB")
         except IOError as e:
@@ -1345,7 +1266,7 @@ def get_cib(argv):
 
 def cluster_node(argv):
     if len(argv) != 2:
-        usage.cluster();
+        usage.cluster()
         sys.exit(1)
 
     if argv[0] == "add":
@@ -1353,7 +1274,7 @@ def cluster_node(argv):
     elif argv[0] in ["remove","delete"]:
         add_node = False
     else:
-        usage.cluster();
+        usage.cluster()
         sys.exit(1)
 
     node = argv[1]
@@ -1373,6 +1294,11 @@ def cluster_node(argv):
         utils.err(output)
 
     if add_node == True:
+        wait = False
+        wait_timeout = None
+        if "--start" in utils.pcs_options and "--wait" in utils.pcs_options:
+            wait_timeout = utils.validate_wait_get_timeout(False)
+            wait = True
         need_ring1_address = utils.need_ring1_address(utils.getCorosyncConf())
         if not node1 and need_ring1_address:
             utils.err(
@@ -1441,12 +1367,15 @@ def cluster_node(argv):
         if utils.is_cman_with_udpu_transport():
             print("Warning: Using udpu transport on a CMAN cluster, "
                 + "cluster restart is required to apply node addition")
+        if wait:
+            print()
+            wait_for_nodes_started([node0], wait_timeout)
     else:
         if node0 not in utils.getNodesFromCorosyncConf():
             utils.err(
                 "node '%s' does not appear to exist in configuration" % node0
             )
-        if not "--force" in utils.pcs_options:
+        if "--force" not in utils.pcs_options:
             retval, data = utils.get_remote_quorumtool_output(node0)
             if retval != 0:
                 utils.err(
@@ -1850,30 +1779,3 @@ def cluster_quorum_unblock(argv):
     utils.set_cib_property("startup-fencing", startup_fencing)
     print("Waiting for nodes cancelled")
 
-class NodeActionThread(threading.Thread):
-    def __init__(self, node):
-        super(NodeActionThread, self).__init__()
-        self.node = node
-        self.retval = 0
-        self.output = ""
-
-class NodeStartThread(NodeActionThread):
-    def run(self):
-        self.retval, self.output = utils.startCluster(self.node, quiet=True)
-
-class NodeStopPacemakerThread(NodeActionThread):
-    def run(self):
-        self.retval, self.output = utils.stopCluster(
-            self.node, quiet=True, pacemaker=True, corosync=False
-        )
-
-class NodeStopCorosyncThread(NodeActionThread):
-    def run(self):
-        self.retval, self.output = utils.stopCluster(
-            self.node, quiet=True, pacemaker=False, corosync=True
-        )
-
-class NodeDestroyThread(NodeActionThread):
-    def run(self):
-        self.retval, self.output = utils.destroyCluster(self.node, quiet=True)
-
diff --git a/pcs/common/__init__.py b/pcs/common/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/common/report_codes.py b/pcs/common/report_codes.py
new file mode 100644
index 0000000..0bc5d48
--- /dev/null
+++ b/pcs/common/report_codes.py
@@ -0,0 +1,82 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+# force cathegories
+FORCE_ACTIVE_RRP = "ACTIVE_RRP"
+FORCE_CONSTRAINT_DUPLICATE = "CONSTRAINT_DUPLICATE"
+FORCE_CONSTRAINT_MULTIINSTANCE_RESOURCE = "CONSTRAINT_MULTIINSTANCE_RESOURCE"
+FORCE_LOAD_THRESHOLD = "LOAD_THRESHOLD"
+FORCE_OPTIONS = "OPTIONS"
+FORCE_QDEVICE_MODEL = "QDEVICE_MODEL"
+SKIP_OFFLINE_NODES = "SKIP_OFFLINE_NODES"
+
+BAD_CLUSTER_STATE_FORMAT = 'BAD_CLUSTER_STATE_FORMAT'
+CIB_CANNOT_FIND_MANDATORY_SECTION = "CIB_CANNOT_FIND_MANDATORY_SECTION"
+CIB_LOAD_ERROR_BAD_FORMAT = "CIB_LOAD_ERROR_BAD_FORMAT"
+CIB_LOAD_ERROR = "CIB_LOAD_ERROR"
+CIB_LOAD_ERROR_SCOPE_MISSING = "CIB_LOAD_ERROR_SCOPE_MISSING"
+CIB_PUSH_ERROR = "CIB_PUSH_ERROR"
+CMAN_BROADCAST_ALL_RINGS = 'CMAN_BROADCAST_ALL_RINGS'
+CMAN_UDPU_RESTART_REQUIRED = 'CMAN_UDPU_RESTART_REQUIRED'
+CMAN_UNSUPPORTED_COMMAND = "CMAN_UNSUPPORTED_COMMAND"
+COMMON_ERROR = 'COMMON_ERROR'
+COMMON_INFO = 'COMMON_INFO'
+COROSYNC_CONFIG_ACCEPTED_BY_NODE = "COROSYNC_CONFIG_ACCEPTED_BY_NODE"
+COROSYNC_CONFIG_DISTRIBUTION_STARTED = "COROSYNC_CONFIG_DISTRIBUTION_STARTED"
+COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR = "COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR"
+COROSYNC_CONFIG_RELOADED = "COROSYNC_CONFIG_RELOADED"
+COROSYNC_CONFIG_RELOAD_ERROR = "COROSYNC_CONFIG_RELOAD_ERROR"
+COROSYNC_NOT_RUNNING_CHECK_STARTED = "COROSYNC_NOT_RUNNING_CHECK_STARTED"
+COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR = "COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR"
+COROSYNC_NOT_RUNNING_ON_NODE = "COROSYNC_NOT_RUNNING_ON_NODE"
+COROSYNC_RUNNING_ON_NODE = "COROSYNC_RUNNING_ON_NODE"
+CRM_MON_ERROR = "CRM_MON_ERROR"
+DUPLICATE_CONSTRAINTS_EXIST = "DUPLICATE_CONSTRAINTS_EXIST"
+EMPTY_RESOURCE_SET_LIST = "EMPTY_RESOURCE_SET_LIST"
+ID_ALREADY_EXISTS = 'ID_ALREADY_EXISTS'
+ID_NOT_FOUND = 'ID_NOT_FOUND'
+IGNORED_CMAN_UNSUPPORTED_OPTION = 'IGNORED_CMAN_UNSUPPORTED_OPTION'
+INVALID_ID = "INVALID_ID"
+INVALID_METADATA_FORMAT = 'INVALID_METADATA_FORMAT'
+INVALID_OPTION = "INVALID_OPTION"
+INVALID_OPTION_VALUE = "INVALID_OPTION_VALUE"
+INVALID_RESOURCE_NAME = 'INVALID_RESOURCE_NAME'
+INVALID_SCORE = "INVALID_SCORE"
+INVALID_TIMEOUT_VALUE = "INVALID_TIMEOUT_VALUE"
+MULTIPLE_SCORE_OPTIONS = "MULTIPLE_SCORE_OPTIONS"
+NODE_COMMUNICATION_ERROR = "NODE_COMMUNICATION_ERROR",
+NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED = "NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED",
+NODE_COMMUNICATION_ERROR_PERMISSION_DENIED = "NODE_COMMUNICATION_ERROR_PERMISSION_DENIED",
+NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT = "NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT",
+NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND = "NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND",
+NODE_COMMUNICATION_FINISHED = "NODE_COMMUNICATION_FINISHED"
+NODE_COMMUNICATION_NOT_CONNECTED = "NODE_COMMUNICATION_NOT_CONNECTED"
+NODE_COMMUNICATION_STARTED = "NODE_COMMUNICATION_STARTED"
+NODE_NOT_FOUND = "NODE_NOT_FOUND"
+NON_UDP_TRANSPORT_ADDR_MISMATCH = 'NON_UDP_TRANSPORT_ADDR_MISMATCH'
+PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND = "PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND"
+PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE",
+PARSE_ERROR_COROSYNC_CONF = "PARSE_ERROR_COROSYNC_CONF",
+PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE = "PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE",
+QDEVICE_ALREADY_DEFINED = "QDEVICE_ALREADY_DEFINED"
+QDEVICE_NOT_DEFINED = "QDEVICE_NOT_DEFINED"
+REQUIRED_OPTION_IS_MISSING = "REQUIRED_OPTION_IS_MISSING"
+RESOURCE_CLEANUP_ERROR = "RESOURCE_CLEANUP_ERROR"
+RESOURCE_CLEANUP_TOO_TIME_CONSUMING = 'RESOURCE_CLEANUP_TOO_TIME_CONSUMING'
+RESOURCE_DOES_NOT_EXIST = 'RESOURCE_DOES_NOT_EXIST'
+RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE = 'RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE'
+RESOURCE_WAIT_ERROR = "RESOURCE_WAIT_ERROR"
+RESOURCE_WAIT_NOT_SUPPORTED = "RESOURCE_WAIT_NOT_SUPPORTED"
+RESOURCE_WAIT_TIMED_OUT = "RESOURCE_WAIT_TIMED_OUT"
+RRP_ACTIVE_NOT_SUPPORTED = 'RRP_ACTIVE_NOT_SUPPORTED'
+RUN_EXTERNAL_PROCESS_ERROR = "RUN_EXTERNAL_PROCESS_ERROR"
+RUN_EXTERNAL_PROCESS_FINISHED = "RUN_EXTERNAL_PROCESS_FINISHED"
+RUN_EXTERNAL_PROCESS_STARTED = "RUN_EXTERNAL_PROCESS_STARTED"
+UNABLE_TO_GET_AGENT_METADATA = 'UNABLE_TO_GET_AGENT_METADATA'
+UNABLE_TO_READ_COROSYNC_CONFIG = "UNABLE_TO_READ_COROSYNC_CONFIG"
+UNKNOWN_COMMAND = 'UNKNOWN_COMMAND'
+UNSUPPORTED_RESOURCE_AGENT = 'UNSUPPORTED_RESOURCE_AGENT'
diff --git a/pcs/common/tools.py b/pcs/common/tools.py
new file mode 100644
index 0000000..7c698e8
--- /dev/null
+++ b/pcs/common/tools.py
@@ -0,0 +1,21 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+
+def simple_cache(func):
+    cache = {
+        "was_run": False,
+        "value": None
+    }
+
+    def wrapper():
+        if not cache["was_run"]:
+            cache["value"] = func()
+            cache["was_run"] = True
+        return cache["value"]
+
+    return wrapper
diff --git a/pcs/config.py b/pcs/config.py
index 1cd32f5..51de822 100644
--- a/pcs/config.py
+++ b/pcs/config.py
@@ -1,7 +1,9 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
 import sys
 import os
@@ -17,7 +19,6 @@ import pwd
 import grp
 import time
 
-logging.basicConfig() # clufter needs logging set before imported
 try:
     import clufter.format_manager
     import clufter.filter_manager
@@ -26,15 +27,24 @@ try:
 except ImportError:
     no_clufter = True
 
-import settings
-import utils
-import cluster
-import constraint
-import prop
-import resource
-import status
-import stonith
-import usage
+from pcs import (
+    cluster,
+    constraint,
+    prop,
+    quorum,
+    resource,
+    settings,
+    status,
+    stonith,
+    usage,
+    utils,
+)
+from pcs.lib.errors import LibraryError
+from pcs.lib.commands import quorum as lib_quorum
+import pcs.cli.constraint_colocation.command as colocation_command
+import pcs.cli.constraint_order.command as order_command
+import pcs.cli.constraint_ticket.command as ticket_command
+from pcs.cli.common.console_report import indent
 
 
 def config_cmd(argv):
@@ -84,6 +94,14 @@ def config_show(argv):
     print()
     config_show_cib()
     cluster.cluster_uidgid([], True)
+    if "--corosync_conf" in utils.pcs_options or not utils.is_rhel6():
+        print()
+        print("Quorum:")
+        try:
+            config = lib_quorum.get_config(utils.get_lib_env())
+            print("\n".join(indent(quorum.quorum_config_to_str(config))))
+        except LibraryError as e:
+            utils.process_library_reports(e.args)
 
 def config_show_cib():
     print("Resources:")
@@ -96,9 +114,14 @@ def config_show_cib():
     print("Fencing Levels:")
     print()
     stonith.stonith_level_show()
+
+    lib = utils.get_library_wrapper()
     constraint.location_show([])
-    constraint.order_show([])
-    constraint.colocation_show([])
+    modificators = utils.get_modificators()
+    order_command.show(lib, [], modificators)
+    colocation_command.show(lib, [], modificators)
+    ticket_command.show(lib, [], modificators)
+
     print()
     del utils.pcs_options["--all"]
     print("Resources Defaults:")
@@ -240,7 +263,7 @@ def config_restore_remote(infile_name, infile_obj):
                     % node
                 )
                 continue
-        except (ValueError, NameError):
+        except (ValueError, NameError, LookupError):
             err_msgs.append("unable to determine status of the node %s" % node)
     if err_msgs:
         for msg in err_msgs:
diff --git a/pcs/constraint.py b/pcs/constraint.py
index 8c027bc..95218d0 100644
--- a/pcs/constraint.py
+++ b/pcs/constraint.py
@@ -1,27 +1,44 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
 import sys
 import xml.dom.minidom
-from xml.dom.minidom import parseString
 from collections import defaultdict
+from xml.dom.minidom import parseString
 
-import usage
-import utils
-import resource
-import rule as rule_utils
-
+import pcs.cli.constraint_colocation.command as colocation_command
+import pcs.cli.constraint_order.command as order_command
+from pcs import (
+    rule as rule_utils,
+    usage,
+    utils,
+)
+from pcs.cli import (
+    constraint_colocation,
+    constraint_order,
+)
+from pcs.cli.constraint_ticket import command as ticket_command
+from pcs.cli.common.errors import CmdLineInputError
+from pcs.lib.cib.constraint import resource_set
+from pcs.lib.cib.constraint.order import ATTRIB as order_attrib
+from pcs.lib.errors import LibraryError
+
+
+OPTIONS_ACTION = resource_set.ATTRIB["action"]
 
-OPTIONS_ACTION = ("start", "promote", "demote", "stop")
 DEFAULT_ACTION = "start"
-OPTIONS_ROLE = ("Stopped", "Started", "Master", "Slave")
 DEFAULT_ROLE = "Started"
-OPTIONS_KIND = ("Optional", "Mandatory", "Serialize")
-OPTIONS_SYMMETRICAL = ("true", "false")
+
+OPTIONS_SYMMETRICAL = order_attrib["symmetrical"]
+OPTIONS_KIND = order_attrib["kind"]
 
 def constraint_cmd(argv):
+    lib = utils.get_library_wrapper()
+    modificators = utils.get_modificators()
     if len(argv) == 0:
         argv = ["list"]
 
@@ -55,13 +72,36 @@ def constraint_cmd(argv):
             sub_cmd2 = argv.pop(0)
 
         if (sub_cmd2 == "set"):
-            order_set(argv)
+            try:
+                order_command.create_with_set(lib, argv, modificators)
+            except CmdLineInputError as e:
+                utils.exit_on_cmdline_input_errror(e, "constraint", 'order set')
+            except LibraryError as e:
+                utils.process_library_reports(e.args)
         elif (sub_cmd2 in ["remove","delete"]):
             order_rm(argv)
         elif (sub_cmd2 == "show"):
-            order_show(argv)
+            order_command.show(lib, argv, modificators)
         else:
             order_start([sub_cmd2] + argv)
+    elif sub_cmd == "ticket":
+        usage_name = "ticket"
+        try:
+            command_map = {
+                "set": ticket_command.create_with_set,
+                "add": ticket_command.add,
+                "show": ticket_command.show,
+            }
+            if argv[0] not in command_map:
+                raise CmdLineInputError()
+            usage_name = "ticket "+argv[0]
+
+            command_map[argv[0]](lib, argv[1:], modificators)
+        except LibraryError as e:
+            utils.process_library_reports(e.args)
+        except CmdLineInputError as e:
+            utils.exit_on_cmdline_input_errror(e, "constraint", usage_name)
+
     elif (sub_cmd == "colocation"):
         if (len(argv) == 0):
             sub_cmd2 = "show"
@@ -73,9 +113,15 @@ def constraint_cmd(argv):
         elif (sub_cmd2 in ["remove","delete"]):
             colocation_rm(argv)
         elif (sub_cmd2 == "set"):
-            colocation_set(argv)
+            try:
+
+                colocation_command.create_with_set(lib, argv, modificators)
+            except LibraryError as e:
+                utils.process_library_reports(e.args)
+            except CmdLineInputError as e:
+                utils.exit_on_cmdline_input_errror(e, "constraint", "colocation set")
         elif (sub_cmd2 == "show"):
-            colocation_show(argv)
+            colocation_command.show(lib, argv, modificators)
         else:
             usage.constraint()
             sys.exit(1)
@@ -83,8 +129,9 @@ def constraint_cmd(argv):
         constraint_rm(argv)
     elif (sub_cmd == "show" or sub_cmd == "list"):
         location_show(argv)
-        order_show(argv)
-        colocation_show(argv)
+        order_command.show(lib, argv, modificators)
+        colocation_command.show(lib, argv, modificators)
+        ticket_command.show(lib, argv, modificators)
     elif (sub_cmd == "ref"):
         constraint_ref(argv)
     elif (sub_cmd == "rule"):
@@ -93,39 +140,7 @@ def constraint_cmd(argv):
         usage.constraint()
         sys.exit(1)
 
-def colocation_show(argv):
-    if "--full" in utils.pcs_options:
-        showDetail = True
-    else:
-        showDetail = False
-
-    (dom,constraintsElement) = getCurrentConstraints()
 
-    resource_colocation_sets = []
-    print("Colocation Constraints:")
-    for co_loc in constraintsElement.getElementsByTagName('rsc_colocation'):
-        if not co_loc.getAttribute("rsc"):
-            resource_colocation_sets.append(co_loc)
-        else:
-            print("  " + colocation_el_to_string(co_loc, showDetail))
-    print_sets(resource_colocation_sets, showDetail)
-
-def colocation_el_to_string(co_loc, showDetail=False):
-    co_resource1 = co_loc.getAttribute("rsc")
-    co_resource2 = co_loc.getAttribute("with-rsc")
-    co_id = co_loc.getAttribute("id")
-    co_score = co_loc.getAttribute("score")
-    score_text = "(score:" + co_score + ")"
-    attrs_list = [
-        "(%s:%s)" % (attr[0], attr[1])
-        for attr in co_loc.attributes.items()
-        if attr[0] not in ("rsc", "with-rsc", "id", "score")
-    ]
-    if showDetail:
-        attrs_list.append("(id:%s)" % co_id)
-    return " ".join(
-        [co_resource1, "with", co_resource2, score_text] + attrs_list
-    )
 
 def colocation_rm(argv):
     elementFound = False
@@ -251,7 +266,6 @@ def colocation_add(argv):
         role2 = DEFAULT_ROLE
     if role2 != "" and role1 == "":
         role1 = DEFAULT_ROLE
-
     element = dom.createElement("rsc_colocation")
     element.setAttribute("rsc",resource1)
     element.setAttribute("with-rsc",resource2)
@@ -268,7 +282,10 @@ def colocation_add(argv):
             utils.err(
                 "duplicate constraint already exists, use --force to override\n"
                 + "\n".join([
-                    "  " + colocation_el_to_string(dup, True)
+                    "  " + constraint_colocation.console_report.constraint_plain(
+                            {"options": dict(dup.attributes.items())},
+                            True
+                        )
                     for dup in duplicates
                 ])
             )
@@ -293,308 +310,6 @@ def colocation_find_duplicates(dom, constraint_el):
             and normalized_el == normalize(other_el)
     ]
 
-def colocation_set(argv):
-    setoptions = []
-    for i in range(len(argv)):
-        if argv[i] == "setoptions":
-            setoptions = argv[i+1:]
-            argv[i:] = []
-            break
-
-    argv.insert(0, "set")
-    resource_sets = set_args_into_array(argv)
-    if not check_empty_resource_sets(resource_sets):
-        usage.constraint(["colocation set"])
-        sys.exit(1)
-    cib, constraints = getCurrentConstraints(utils.get_cib_dom())
-
-    attributes = []
-    score_options = ("score", "score-attribute", "score-attribute-mangle")
-    score_specified = False
-    id_specified = False
-    for opt in setoptions:
-        if "=" not in opt:
-            utils.err("missing value of '%s' option" % opt)
-        name, value = opt.split("=", 1)
-        if name == "id":
-            id_valid, id_error = utils.validate_xml_id(value, 'constraint id')
-            if not id_valid:
-                utils.err(id_error)
-            if utils.does_id_exist(cib, value):
-                utils.err(
-                    "id '%s' is already in use, please specify another one"
-                    % value
-                )
-            id_specified = True
-            attributes.append((name, value))
-        elif name in score_options:
-            if score_specified:
-                utils.err("you cannot specify multiple score options")
-            if name == "score" and not utils.is_score(value):
-                utils.err(
-                    "invalid score '%s', use integer or INFINITY or -INFINITY"
-                    % value
-                )
-            score_specified = True
-            attributes.append((name, value))
-        else:
-            utils.err(
-                "invalid option '%s', allowed options are: %s"
-                % (name, ", ".join(score_options + ("id",)))
-            )
-
-    if not score_specified:
-        attributes.append(("score", "INFINITY"))
-    if not id_specified:
-        colocation_id = "pcs_rsc_colocation"
-        for a in argv:
-            if "=" not in a:
-                colocation_id += "_" + a
-        attributes.append(("id", utils.find_unique_id(cib, colocation_id)))
-
-    rsc_colocation = cib.createElement("rsc_colocation")
-    for name, value in attributes:
-        rsc_colocation.setAttribute(name, value)
-    set_add_resource_sets(rsc_colocation, resource_sets, cib)
-    constraints.appendChild(rsc_colocation)
-    utils.replace_cib_configuration(cib)
-
-def order_show(argv):
-    if "--full" in utils.pcs_options:
-        showDetail = True
-    else:
-        showDetail = False
-
-    (dom,constraintsElement) = getCurrentConstraints()
-
-    resource_order_sets = []
-    print("Ordering Constraints:")
-    for ord_loc in constraintsElement.getElementsByTagName('rsc_order'):
-        if not ord_loc.getAttribute("first"):
-            resource_order_sets.append(ord_loc)
-        else:
-            print("  " + order_el_to_string(ord_loc, showDetail))
-    print_sets(resource_order_sets,showDetail)
-
-def order_el_to_string(ord_loc, showDetail=False):
-    oc_resource1 = ord_loc.getAttribute("first")
-    oc_resource2 = ord_loc.getAttribute("then")
-    first_action = ord_loc.getAttribute("first-action")
-    then_action = ord_loc.getAttribute("then-action")
-    oc_id = ord_loc.getAttribute("id")
-    oc_score = ord_loc.getAttribute("score")
-    oc_kind = ord_loc.getAttribute("kind")
-    oc_sym = ""
-    oc_id_out = ""
-    oc_options = ""
-    if (
-        ord_loc.getAttribute("symmetrical")
-        and
-        not utils.is_cib_true(ord_loc.getAttribute("symmetrical"))
-    ):
-        oc_sym = "(non-symmetrical)"
-    if oc_kind != "":
-        score_text = "(kind:" + oc_kind + ")"
-    elif oc_kind == "" and oc_score == "":
-        score_text = "(kind:Mandatory)"
-    else:
-        score_text = "(score:" + oc_score + ")"
-    if showDetail:
-        oc_id_out = "(id:"+oc_id+")"
-    already_processed_attrs = (
-        "first", "then", "first-action", "then-action", "id", "score", "kind",
-        "symmetrical"
-    )
-    oc_options = " ".join([
-        "{0}={1}".format(name, value)
-        for name, value in ord_loc.attributes.items()
-        if name not in already_processed_attrs
-    ])
-    if oc_options:
-        oc_options = "(Options: " + oc_options + ")"
-    return " ".join([arg for arg in [
-        first_action, oc_resource1, "then", then_action, oc_resource2,
-        score_text, oc_sym, oc_options, oc_id_out
-    ] if arg])
-
-def print_sets(sets,showDetail):
-    if len(sets) != 0:
-        print("  Resource Sets:")
-        for ro in sets:
-            print("    " + set_constraint_el_to_string(ro, showDetail))
-
-def set_constraint_el_to_string(constraint_el, showDetail=False):
-    set_list = []
-    for set_el in constraint_el.getElementsByTagName("resource_set"):
-        set_list.append("set " + " ".join(
-            [
-                res_el.getAttribute("id")
-                for res_el in set_el.getElementsByTagName("resource_ref")
-            ]
-            +
-            utils.dom_attrs_to_list(set_el, showDetail)
-        ))
-    constraint_opts = utils.dom_attrs_to_list(constraint_el, False)
-    if constraint_opts:
-        constraint_opts.insert(0, "setoptions")
-    if showDetail:
-        constraint_opts.append("(id:%s)" % constraint_el.getAttribute("id"))
-    return " ".join(set_list + constraint_opts)
-
-def set_args_into_array(argv):
-    all_sets = []
-    current_set = None
-    for elem in argv:
-        if "set" == elem:
-            if current_set is not None:
-                all_sets.append(current_set)
-            current_set = []
-        else:
-            current_set.append(elem)
-    if current_set is not None:
-        all_sets.append(current_set)
-    return all_sets
-
-def check_empty_resource_sets(sets):
-    if not sets:
-        return False
-    return all(sets)
-
-def set_add_resource_sets(elem, sets, cib):
-    allowed_options = {
-        "sequential": ("true", "false"),
-        "require-all": ("true", "false"),
-        "action" : OPTIONS_ACTION,
-        "role" : OPTIONS_ROLE,
-    }
-
-    for o_set in sets:
-        set_id = "pcs_rsc_set"
-        res_set = cib.createElement("resource_set")
-        elem.appendChild(res_set)
-        for opts in o_set:
-            if opts.find("=") != -1:
-                key,val = opts.split("=")
-                if key not in allowed_options:
-                    utils.err(
-                        "invalid option '%s', allowed options are: %s"
-                        % (key, ", ".join(allowed_options.keys()))
-                    )
-                if val not in allowed_options[key]:
-                    utils.err(
-                        "invalid value '%s' of option '%s', allowed values are: %s"
-                        % (val, key, ", ".join(allowed_options[key]))
-                    )
-                res_set.setAttribute(key, val)
-            else:
-                res_valid, res_error, correct_id \
-                    = utils.validate_constraint_resource(cib, opts)
-                if "--autocorrect" in utils.pcs_options and correct_id:
-                    opts = correct_id
-                elif not res_valid:
-                    utils.err(res_error)
-                se = cib.createElement("resource_ref")
-                res_set.appendChild(se)
-                se.setAttribute("id", opts)
-                set_id = set_id + "_" + opts
-        res_set.setAttribute("id", utils.find_unique_id(cib, set_id))
-    if "--force" not in utils.pcs_options:
-        duplicates = set_constraint_find_duplicates(cib, elem)
-        if duplicates:
-            utils.err(
-                "duplicate constraint already exists, use --force to override\n"
-                + "\n".join([
-                    "  " + set_constraint_el_to_string(dup, True)
-                    for dup in duplicates
-                ])
-            )
-
-def set_constraint_find_duplicates(dom, constraint_el):
-    def normalize(constraint_el):
-        return [
-            [
-                ref_el.getAttribute("id")
-                for ref_el in set_el.getElementsByTagName("resource_ref")
-            ]
-            for set_el in constraint_el.getElementsByTagName("resource_set")
-        ]
-
-    normalized_el = normalize(constraint_el)
-    return [
-        other_el
-        for other_el in dom.getElementsByTagName(constraint_el.tagName)
-        if other_el.getElementsByTagName("resource_set")
-            and constraint_el is not other_el
-            and normalized_el == normalize(other_el)
-    ]
-
-def order_set(argv):
-    setoptions = []
-    for i in range(len(argv)):
-        if argv[i] == "setoptions":
-            setoptions = argv[i+1:]
-            argv[i:] = []
-            break
-
-    argv.insert(0, "set")
-    resource_sets = set_args_into_array(argv)
-    if not check_empty_resource_sets(resource_sets):
-        usage.constraint(["order set"])
-        sys.exit(1)
-    cib, constraints = getCurrentConstraints(utils.get_cib_dom())
-
-    attributes = []
-    id_specified = False
-    for opt in setoptions:
-        if "=" not in opt:
-            utils.err("missing value of '%s' option" % opt)
-        name, value = opt.split("=", 1)
-        if name == "id":
-            id_valid, id_error = utils.validate_xml_id(value, 'constraint id')
-            if not id_valid:
-                utils.err(id_error)
-            if utils.does_id_exist(cib, value):
-                utils.err(
-                    "id '%s' is already in use, please specify another one"
-                    % value
-                )
-            id_specified = True
-            attributes.append((name, value))
-        elif name == "kind":
-            normalized_value = value.lower().capitalize()
-            if normalized_value not in OPTIONS_KIND:
-                utils.err(
-                    "invalid kind value '%s', allowed values are: %s"
-                    % (value, ", ".join(OPTIONS_KIND))
-                )
-            attributes.append((name, normalized_value))
-        elif name == "symmetrical":
-            if value.lower() not in OPTIONS_SYMMETRICAL:
-                utils.err(
-                    "invalid symmetrical value '%s', allowed values are: %s"
-                    % (value, ", ".join(OPTIONS_SYMMETRICAL))
-                )
-            attributes.append((name, value.lower()))
-        else:
-            utils.err(
-                "invalid option '%s', allowed options are: %s"
-                % (name, "kind, symmetrical, id")
-            )
-
-    if not id_specified:
-        order_id = "pcs_rsc_order"
-        for a in argv:
-            if "=" not in a:
-                order_id += "_" + a
-        attributes.append(("id", utils.find_unique_id(cib, order_id)))
-
-    rsc_order = cib.createElement("rsc_order")
-    for name, value in attributes:
-        rsc_order.setAttribute(name, value)
-    set_add_resource_sets(rsc_order, resource_sets, cib)
-    constraints.appendChild(rsc_order)
-    utils.replace_cib_configuration(cib)
-
 def order_rm(argv):
     if len(argv) == 0:
         usage.constraint()
@@ -639,22 +354,22 @@ def order_start(argv):
     then_action = DEFAULT_ACTION
     action = argv[0]
     if action in OPTIONS_ACTION:
-            first_action = action
-            argv.pop(0)
+        first_action = action
+        argv.pop(0)
 
     resource1 = argv.pop(0)
     if argv.pop(0) != "then":
         usage.constraint()
         sys.exit(1)
-    
+
     if len(argv) == 0:
         usage.constraint()
         sys.exit(1)
 
     action = argv[0]
     if action in OPTIONS_ACTION:
-            then_action = action
-            argv.pop(0)
+        then_action = action
+        argv.pop(0)
 
     if len(argv) == 0:
         usage.constraint()
@@ -763,7 +478,10 @@ def order_add(argv,returnElementOnly=False):
             utils.err(
                 "duplicate constraint already exists, use --force to override\n"
                 + "\n".join([
-                    "  " + order_el_to_string(dup, True) for dup in duplicates
+                    "  " + constraint_order.console_report.constraint_plain(
+                            {"options": dict(dup.attributes.items())},
+                            True
+                        ) for dup in duplicates
                 ])
             )
     print(
@@ -810,7 +528,7 @@ def location_show(argv):
     else:
         valid_noderes = []
 
-    (dom,constraintsElement) = getCurrentConstraints()
+    (dummy_dom,constraintsElement) = getCurrentConstraints()
     nodehashon = {}
     nodehashoff = {}
     rschashon = {}
@@ -835,7 +553,7 @@ def location_show(argv):
 # NEED TO FIX FOR GROUP LOCATION CONSTRAINTS (where there are children of
 # rsc_location)
         if lc_score == "":
-            lc_score = "0";
+            lc_score = "0"
 
         if lc_score == "INFINITY":
             positive = True
@@ -940,7 +658,7 @@ def show_location_rules(ruleshash,showDetail,noheader=False):
             if rule.parentNode.getAttribute("resource-discovery"):
                 constraint_options[constraint_id].append("resource-discovery=%s" % rule.parentNode.getAttribute("resource-discovery"))
 
-        for constraint_id in constrainthash.keys():
+        for constraint_id in sorted(constrainthash.keys()):
             if constraint_id in constraint_options and len(constraint_options[constraint_id]) > 0:
                 constraint_option_info = " (" + " ".join(constraint_options[constraint_id]) + ")"
             else:
@@ -984,10 +702,10 @@ def location_prefer(argv):
                     score = "-" + score
             node = nodeconf_a[0]
         location_add(["location-" +rsc+"-"+node+"-"+score,rsc,node,score])
-        
+
 
 def location_add(argv,rm=False):
-    if len(argv) < 4 and (rm == False or len(argv) < 1): 
+    if len(argv) < 4 and (rm == False or len(argv) < 1):
         usage.constraint()
         sys.exit(1)
 
@@ -1064,7 +782,7 @@ def location_rule(argv):
     if len(argv) < 3:
         usage.constraint(["location", "rule"])
         sys.exit(1)
-    
+
     res_name = argv.pop(0)
     resource_valid, resource_error, correct_id \
         = utils.validate_constraint_resource(utils.get_cib_dom(), res_name)
@@ -1186,7 +904,7 @@ def constraint_rm(argv,returnStatus=False, constraintsElement=None, passed_dom=N
         use_cibadmin = True
     else:
         use_cibadmin = False
-        
+
     for co in constraintsElement.childNodes[:]:
         if co.nodeType != xml.dom.Node.ELEMENT_NODE:
             continue
@@ -1230,7 +948,7 @@ def constraint_ref(argv):
         else:
             for constraint in constraints:
                 print("  " + constraint)
-            for constraint in set_constraints:
+            for constraint in sorted(set_constraints):
                 print("  " + constraint)
 
 def remove_constraints_containing(resource_id,output=False,constraints_element = None, passed_dom=None):
@@ -1293,6 +1011,7 @@ def find_constraints_containing(resource_id, passed_dom=None):
     myConstraints = constraints.getElementsByTagName("rsc_colocation")
     myConstraints += constraints.getElementsByTagName("rsc_location")
     myConstraints += constraints.getElementsByTagName("rsc_order")
+    myConstraints += constraints.getElementsByTagName("rsc_ticket")
     attr_to_match = ["rsc", "first", "then", "with-rsc", "first", "then"]
     for c in myConstraints:
         for attr in attr_to_match:
@@ -1359,7 +1078,6 @@ def constraint_rule(argv):
 
 
     constraint_id = None
-    rule_id = None
 
     if command == "add":
         constraint_id = argv.pop(0)
@@ -1382,7 +1100,6 @@ def constraint_rule(argv):
         constraints = cib.find('.//constraints')
         loc_cons = cib.findall(str('.//rsc_location'))
 
-        rules = cib.findall(str('.//rule'))
         for loc_con in loc_cons:
             for rule in loc_con:
                 if rule.get("id") == temp_id:
diff --git a/pcs/error_codes.py b/pcs/error_codes.py
deleted file mode 100644
index 9c08f1e..0000000
--- a/pcs/error_codes.py
+++ /dev/null
@@ -1,23 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
-
-ACL_ROLE_ALREADY_EXISTS = 'ACL_ROLE_ALREADY_EXISTS'
-ACL_ROLE_NOT_FOUND = 'ACL_ROLE_NOT_FOUND'
-BAD_ACL_PERMISSION = 'BAD_ACL_PERMISSION'
-BAD_ACL_SCOPE_TYPE = 'BAD_ACL_SCOPE_TYPE'
-CMAN_BROADCAST_ALL_RINGS = 'CMAN_BROADCAST_ALL_RINGS'
-CMAN_UDPU_RESTART_REQUIRED = 'CMAN_UDPU_RESTART_REQUIRED'
-COMMON_ERROR = 'COMMON_ERROR'
-COMMON_INFO = 'COMMON_INFO'
-ID_ALREADY_EXISTS = 'ID_ALREADY_EXISTS'
-ID_IS_NOT_VALID = 'ID_IS_NOT_VALID'
-ID_NOT_FOUND = 'ID_NOT_FOUND'
-IGNORED_CMAN_UNSUPPORTED_OPTION = 'IGNORED_CMAN_UNSUPPORTED_OPTION'
-INVALID_OPTION_VALUE = 'INVALID_OPTION_VALUE'
-NON_UDP_TRANSPORT_ADDR_MISMATCH = 'NON_UDP_TRANSPORT_ADDR_MISMATCH'
-RRP_ACTIVE_NOT_SUPPORTED = 'RRP_ACTIVE_NOT_SUPPORTED'
-UNKNOWN_COMMAND = 'UNKNOWN_COMMAND'
-UNKNOWN_RRP_MODE = 'UNKNOWN_RRP_MODE'
-UNKNOWN_TRANSPORT = 'UNKNOWN_TRANSPORT'
diff --git a/pcs/lib/__init__.py b/pcs/lib/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/lib/cib/__init__.py b/pcs/lib/cib/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/lib/cib/acl.py b/pcs/lib/cib/acl.py
new file mode 100644
index 0000000..b4bc279
--- /dev/null
+++ b/pcs/lib/cib/acl.py
@@ -0,0 +1,104 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.lib import reports
+from pcs.lib.errors import LibraryError
+from pcs.lib.cib.tools import (
+    check_new_id_applicable,
+    does_id_exist,
+    find_unique_id,
+    get_acls,
+)
+
+class AclRoleNotFound(LibraryError):
+    pass
+
+def __validate_permissions(tree, permission_info_list):
+    report_items = []
+    allowed_permissions = ["read", "write", "deny"]
+    allowed_scopes = ["xpath", "id"]
+    for permission, scope_type, scope in permission_info_list:
+        if not permission in allowed_permissions:
+            report_items.append(reports.invalid_option_value(
+                "permission",
+                permission,
+                allowed_permissions
+            ))
+
+        if not scope_type in allowed_scopes:
+            report_items.append(reports.invalid_option_value(
+                "scope type",
+                scope_type,
+                allowed_scopes
+            ))
+
+        if scope_type == 'id' and not does_id_exist(tree, scope):
+            report_items.append(reports.id_not_found(scope, "id"))
+
+    if report_items:
+        raise LibraryError(*report_items)
+
+def __find_role(tree, role_id):
+    role = tree.find('.//acl_role[@id="{0}"]'.format(role_id))
+    if role is not None:
+        return role
+    raise AclRoleNotFound(reports.id_not_found(role_id, "role"))
+
+def create_role(tree, role_id, description=""):
+    """
+    role_id id of desired role
+    description role description
+    """
+    check_new_id_applicable(tree, "ACL role", role_id)
+    role = etree.SubElement(get_acls(tree), "acl_role", id=role_id)
+    if description:
+        role.set("description", description)
+
+def provide_role(tree, role_id):
+    """
+    role_id id of desired role
+    description role description
+    """
+    try:
+        __find_role(tree, role_id)
+    except AclRoleNotFound:
+        create_role(tree, role_id)
+
+def add_permissions_to_role(tree, role_id, permission_info_list):
+    """
+    tree etree node
+    role_id value of atribute id, which exists in dom
+    permission_info_list list of tuples,
+        each contains (permission, scope_type, scope)
+    """
+    __validate_permissions(tree, permission_info_list)
+
+    area_type_attribute_map = {
+        'xpath': 'xpath',
+        'id': 'reference',
+    }
+    for permission, scope_type, scope in permission_info_list:
+        perm = etree.SubElement(__find_role(tree, role_id), "acl_permission")
+        perm.set(
+            "id",
+            find_unique_id(tree, "{0}-{1}".format(role_id, permission))
+        )
+        perm.set("kind", permission)
+        perm.set(area_type_attribute_map[scope_type], scope)
+
+def remove_permissions_referencing(tree, reference):
+    xpath = './/acl_permission[@reference="{0}"]'.format(reference)
+    for permission in tree.findall(xpath):
+        permission.getparent().remove(permission)
+
+def dom_remove_permissions_referencing(dom, reference):
+    # TODO: remove once we go fully lxml
+    for permission in dom.getElementsByTagName("acl_permission"):
+        if permission.getAttribute("reference") == reference:
+            permission.parentNode.removeChild(permission)
diff --git a/pcs/lib/cib/constraint/__init__.py b/pcs/lib/cib/constraint/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/lib/cib/constraint/colocation.py b/pcs/lib/cib/constraint/colocation.py
new file mode 100644
index 0000000..9dd423a
--- /dev/null
+++ b/pcs/lib/cib/constraint/colocation.py
@@ -0,0 +1,40 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from functools import partial
+
+from pcs.lib import reports
+from pcs.lib.cib.constraint import constraint
+from pcs.lib.cib.tools import check_new_id_applicable
+from pcs.lib.errors import LibraryError
+from pcs.lib.pacemaker_values import is_score_value, SCORE_INFINITY
+
+TAG_NAME = 'rsc_colocation'
+DESCRIPTION = "constraint id"
+SCORE_NAMES = ("score", "score-attribute", "score-attribute-mangle")
+
+def prepare_options_with_set(cib, options, resource_set_list):
+    options = constraint.prepare_options(
+        tuple(SCORE_NAMES),
+        options,
+        partial(constraint.create_id, cib, TAG_NAME, resource_set_list),
+        partial(check_new_id_applicable, cib, DESCRIPTION),
+    )
+
+    if "score" in options and not is_score_value(options["score"]):
+        raise LibraryError(reports.invalid_score(options["score"]))
+
+    score_attrs_count = len([
+        name for name in options.keys() if name in SCORE_NAMES
+    ])
+    if score_attrs_count > 1:
+        raise LibraryError(reports.multiple_score_options())
+
+    if score_attrs_count == 0:
+        options["score"] = SCORE_INFINITY
+
+    return options
diff --git a/pcs/lib/cib/constraint/constraint.py b/pcs/lib/cib/constraint/constraint.py
new file mode 100644
index 0000000..68939b2
--- /dev/null
+++ b/pcs/lib/cib/constraint/constraint.py
@@ -0,0 +1,139 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+
+from lxml import etree
+
+from pcs.common import report_codes
+from pcs.lib import reports
+from pcs.lib.cib import resource
+from pcs.lib.cib.constraint import resource_set
+from pcs.lib.cib.tools import export_attributes, find_unique_id, find_parent
+from pcs.lib.errors import LibraryError, ReportItemSeverity
+
+
+def _validate_attrib_names(attrib_names, options):
+    for option_name in options.keys():
+        if option_name not in attrib_names:
+            raise LibraryError(
+                reports.invalid_option(option_name, attrib_names, None)
+            )
+
+def find_valid_resource_id(
+    report_processor, cib, can_repair_to_clone, in_clone_allowed, id
+):
+    resource_element = resource.find_by_id(cib, id)
+
+    if(resource_element is None):
+        raise LibraryError(reports.resource_does_not_exist(id))
+
+    if resource_element.tag in resource.TAGS_CLONE:
+        return resource_element.attrib["id"]
+
+    clone = find_parent(resource_element, resource.TAGS_CLONE)
+    if clone is None:
+        return resource_element.attrib["id"]
+
+    if can_repair_to_clone:
+        #this is workaround for web ui, console should not use it, so we do not
+        #warn about it
+        return clone.attrib["id"]
+
+    if in_clone_allowed:
+        report_processor.process(
+            reports.resource_for_constraint_is_multiinstance(
+                resource_element.attrib["id"],
+                clone.tag,
+                clone.attrib["id"],
+                ReportItemSeverity.WARNING,
+            )
+        )
+        return resource_element.attrib["id"]
+
+    raise LibraryError(reports.resource_for_constraint_is_multiinstance(
+        resource_element.attrib["id"],
+        clone.tag,
+        clone.attrib["id"],
+        ReportItemSeverity.ERROR,
+        #repair to clone is workaround for web ui, so we put only information
+        #about one forceable possibility
+        forceable=report_codes.FORCE_CONSTRAINT_MULTIINSTANCE_RESOURCE
+    ))
+
+def prepare_options(attrib_names, options, create_id, validate_id):
+    _validate_attrib_names(attrib_names+("id",), options)
+    options = options.copy()
+
+    if "id" not in options:
+        options["id"] = create_id()
+    else:
+        validate_id(options["id"])
+    return options
+
+def export_with_set(element):
+    return {
+        "resource_sets": [
+            resource_set.export(resource_set_item)
+            for resource_set_item in element.findall(".//resource_set")
+        ],
+        "options": export_attributes(element),
+    }
+
+def export_plain(element):
+    return {"options": export_attributes(element)}
+
+def create_id(cib, type_prefix, resource_set_list):
+    id = "pcs_" +type_prefix +"".join([
+        "_set_"+"_".join(id_set)
+        for id_set in resource_set.extract_id_set_list(resource_set_list)
+    ])
+    return find_unique_id(cib, id)
+
+def have_duplicate_resource_sets(element, other_element):
+    get_id_set_list = lambda element: [
+        resource_set.get_resource_id_set_list(resource_set_item)
+        for resource_set_item in element.findall(".//resource_set")
+    ]
+    return get_id_set_list(element) == get_id_set_list(other_element)
+
+def check_is_without_duplication(
+    report_processor,
+    constraint_section, element, are_duplicate, export_element,
+    duplication_alowed=False
+):
+    duplicate_element_list = [
+        duplicate_element
+        for duplicate_element in constraint_section.findall(".//"+element.tag)
+        if(
+            element is not duplicate_element
+            and
+            are_duplicate(element, duplicate_element)
+        )
+    ]
+    if not duplicate_element_list:
+        return
+
+    report_processor.process(reports.duplicate_constraints_exist(
+        element.tag,
+        [
+            export_element(duplicate_element)
+            for duplicate_element in duplicate_element_list
+        ],
+        ReportItemSeverity.WARNING if duplication_alowed
+            else ReportItemSeverity.ERROR,
+        forceable=None if duplication_alowed
+            else report_codes.FORCE_CONSTRAINT_DUPLICATE,
+    ))
+
+def create_with_set(constraint_section, tag_name, options, resource_set_list):
+    if not resource_set_list:
+        raise LibraryError(reports.empty_resource_set_list())
+    element = etree.SubElement(constraint_section, tag_name)
+    element.attrib.update(options)
+    for resource_set_item in resource_set_list:
+        resource_set.create(element, resource_set_item)
+    return element
diff --git a/pcs/lib/cib/constraint/order.py b/pcs/lib/cib/constraint/order.py
new file mode 100644
index 0000000..7823bcd
--- /dev/null
+++ b/pcs/lib/cib/constraint/order.py
@@ -0,0 +1,53 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from functools import partial
+
+from pcs.lib import reports
+from pcs.lib.cib.constraint import constraint
+from pcs.lib.cib.tools import check_new_id_applicable
+from pcs.lib.errors import LibraryError
+
+
+TAG_NAME = "rsc_order"
+DESCRIPTION = "constraint id"
+ATTRIB = {
+    "symmetrical": ("true", "false"),
+    "kind": ("Optional", "Mandatory", "Serialize"),
+}
+
+def prepare_options_with_set(cib, options, resource_set_list):
+    options = constraint.prepare_options(
+        tuple(ATTRIB.keys()),
+        options,
+        create_id=partial(
+            constraint.create_id, cib, TAG_NAME, resource_set_list
+        ),
+        validate_id=partial(check_new_id_applicable, cib, DESCRIPTION),
+    )
+
+    report_items = []
+    if "kind" in options:
+        kind = options["kind"].lower().capitalize()
+        if kind not in ATTRIB["kind"]:
+            report_items.append(reports.invalid_option_value(
+                "kind", options["kind"], ATTRIB["kind"]
+            ))
+        options["kind"] = kind
+
+    if "symmetrical" in options:
+        symmetrical = options["symmetrical"].lower()
+        if symmetrical not in ATTRIB["symmetrical"]:
+            report_items.append(reports.invalid_option_value(
+                "symmetrical", options["symmetrical"], ATTRIB["symmetrical"]
+            ))
+        options["symmetrical"] = symmetrical
+
+    if report_items:
+        raise LibraryError(*report_items)
+
+    return options
diff --git a/pcs/lib/cib/constraint/resource_set.py b/pcs/lib/cib/constraint/resource_set.py
new file mode 100644
index 0000000..f01edf1
--- /dev/null
+++ b/pcs/lib/cib/constraint/resource_set.py
@@ -0,0 +1,74 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.lib import reports
+from pcs.lib.cib.tools import (
+    find_unique_id,
+    export_attributes,
+)
+from pcs.lib.errors import LibraryError
+
+ATTRIB = {
+    "sequential": ("true", "false"),
+    "require-all":("true", "false"),
+    "action" : ("start", "promote", "demote", "stop"),
+    "role" : ("Stopped", "Started", "Master", "Slave"),
+}
+
+def prepare_set(find_valid_id, resource_set):
+    """return resource_set with corrected ids"""
+    validate_options(resource_set["options"])
+    return {
+        "ids": [find_valid_id(id) for id in resource_set["ids"]],
+        "options": resource_set["options"]
+    }
+
+def validate_options(options):
+    #Pacemaker does not care currently about meaningfulness for concrete
+    #constraint, so we use all attribs.
+    for name, value in options.items():
+        if name not in ATTRIB:
+            raise LibraryError(
+                reports.invalid_option(name, list(ATTRIB.keys()), None)
+            )
+        if value not in ATTRIB[name]:
+            raise LibraryError(
+                reports.invalid_option_value(name, value, ATTRIB[name])
+            )
+
+def extract_id_set_list(resource_set_list):
+    return [resource_set["ids"] for resource_set in resource_set_list]
+
+def create(parent, resource_set):
+    """
+    parent - lxml element for append new resource_set
+    """
+    element = etree.SubElement(parent, "resource_set")
+    element.attrib.update(resource_set["options"])
+    element.attrib["id"] = find_unique_id(
+        parent.getroottree(),
+        "pcs_rsc_set_{0}".format("_".join(resource_set["ids"]))
+    )
+
+    for id in resource_set["ids"]:
+        etree.SubElement(element, "resource_ref").attrib["id"] = id
+
+    return element
+
+def get_resource_id_set_list(element):
+    return [
+        resource_ref_element.attrib["id"]
+        for resource_ref_element in element.findall(".//resource_ref")
+    ]
+
+def export(element):
+    return {
+        "ids": get_resource_id_set_list(element),
+        "options": export_attributes(element),
+    }
diff --git a/pcs/lib/cib/constraint/ticket.py b/pcs/lib/cib/constraint/ticket.py
new file mode 100644
index 0000000..6b7cdb2
--- /dev/null
+++ b/pcs/lib/cib/constraint/ticket.py
@@ -0,0 +1,117 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from functools import partial
+
+from lxml import etree
+
+from pcs.lib import reports
+from pcs.lib.cib.constraint import constraint
+from pcs.lib.cib import tools
+from pcs.lib.errors import LibraryError
+
+TAG_NAME = 'rsc_ticket'
+DESCRIPTION = "constraint id"
+ATTRIB = {
+    "loss-policy": ("fence", "stop", "freeze", "demote"),
+    "ticket": None,
+}
+ATTRIB_PLAIN = {
+    "rsc": None,
+    "rsc-role": ("Stopped", "Started", "Master", "Slave"),
+}
+
+def _validate_options_common(options):
+    report = []
+    if "loss-policy" in options:
+        loss_policy = options["loss-policy"].lower()
+        if options["loss-policy"] not in ATTRIB["loss-policy"]:
+            report.append(reports.invalid_option_value(
+                "loss-policy", options["loss-policy"], ATTRIB["loss-policy"]
+            ))
+        options["loss-policy"] = loss_policy
+    return report
+
+def _create_id(cib, ticket, resource_id, resource_role):
+    return tools.find_unique_id(
+        cib,
+        "-".join(('ticket', ticket, resource_id, resource_role))
+    )
+
+def prepare_options_with_set(cib, options, resource_set_list):
+    options = constraint.prepare_options(
+        tuple(ATTRIB.keys()),
+        options,
+        create_id=partial(
+            constraint.create_id, cib, TAG_NAME, resource_set_list
+        ),
+        validate_id=partial(tools.check_new_id_applicable, cib, DESCRIPTION),
+    )
+    report  = _validate_options_common(options)
+    if "ticket" not in options:
+        report.append(reports.required_option_is_missing('ticket'))
+    if report:
+        raise LibraryError(*report)
+    return options
+
+def prepare_options_plain(cib, options, ticket, resource_id):
+    options = options.copy()
+
+    report = _validate_options_common(options)
+
+    if not ticket:
+        report.append(reports.required_option_is_missing('ticket'))
+    options["ticket"] = ticket
+
+    if not resource_id:
+        report.append(reports.required_option_is_missing('rsc'))
+    options["rsc"] = resource_id
+
+    if "rsc-role" in options:
+        if options["rsc-role"]:
+            resource_role = options["rsc-role"].lower().capitalize()
+            if resource_role not in ATTRIB_PLAIN["rsc-role"]:
+                report.append(reports.invalid_option_value(
+                    "rsc-role", options["rsc-role"], ATTRIB_PLAIN["rsc-role"]
+                ))
+            options["rsc-role"] = resource_role
+        else:
+            del(options["rsc-role"])
+
+    if report:
+        raise LibraryError(*report)
+
+    return constraint.prepare_options(
+        tuple(list(ATTRIB) + list(ATTRIB_PLAIN)),
+        options,
+        partial(
+            _create_id,
+            cib,
+            options["ticket"],
+            resource_id,
+            options["rsc-role"] if "rsc-role" in options else "no-role"
+        ),
+        partial(tools.check_new_id_applicable, cib, DESCRIPTION)
+    )
+
+def create_plain(constraint_section, options):
+    element = etree.SubElement(constraint_section, TAG_NAME)
+    element.attrib.update(options)
+    return element
+
+def are_duplicate_plain(element, other_element):
+    return all(
+        element.attrib.get(name, "") == other_element.attrib.get(name, "")
+        for name in ("ticket", "rsc", "rsc-role")
+    )
+
+def are_duplicate_with_resource_set(element, other_element):
+    return (
+        element.attrib["ticket"] == other_element.attrib["ticket"]
+        and
+        constraint.have_duplicate_resource_sets(element, other_element)
+    )
diff --git a/pcs/lib/cib/resource.py b/pcs/lib/cib/resource.py
new file mode 100644
index 0000000..eb368fa
--- /dev/null
+++ b/pcs/lib/cib/resource.py
@@ -0,0 +1,15 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+TAGS_CLONE = "clone", "master"
+TAGS_ALL = TAGS_CLONE + ("primitive", "group")
+
+def find_by_id(tree, id):
+    element = tree.find('.//*[@id="{0}"]'.format(id))
+    if element is None or element.tag not in TAGS_ALL:
+        return None
+    return element
diff --git a/pcs/lib/cib/test/__init__.py b/pcs/lib/cib/test/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/lib/cib/test/test_constraint.py b/pcs/lib/cib/test/test_constraint.py
new file mode 100644
index 0000000..961f8b0
--- /dev/null
+++ b/pcs/lib/cib/test/test_constraint.py
@@ -0,0 +1,292 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from functools import partial
+from unittest import TestCase
+
+from lxml import etree
+
+from pcs.common import report_codes
+from pcs.lib.cib.constraint import constraint
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.assertions import(
+    assert_raise_library_error,
+    assert_xml_equal,
+)
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.assertions import (
+    assert_report_item_list_equal,
+)
+
+
+def fixture_element(tag, id):
+    element = mock.MagicMock()
+    element.tag = tag
+    element.attrib = {"id": id}
+    return element
+
+ at mock.patch("pcs.lib.cib.constraint.constraint.find_parent")
+ at mock.patch("pcs.lib.cib.constraint.constraint.resource.find_by_id")
+class FindValidResourceId(TestCase):
+    def setUp(self):
+        self.cib = "cib"
+        self.report_processor = MockLibraryReportProcessor()
+        self.find = partial(
+            constraint.find_valid_resource_id,
+            self.report_processor,
+            self.cib,
+            can_repair_to_clone=False,
+            in_clone_allowed=False,
+        )
+
+    def test_raises_when_element_not_found(self, mock_find_by_id, _):
+        mock_find_by_id.return_value = None
+        assert_raise_library_error(
+            lambda: self.find(id="resourceA"),
+            (
+                severities.ERROR,
+                report_codes.RESOURCE_DOES_NOT_EXIST,
+                {"resource_id": "resourceA"}
+            ),
+        )
+
+    def test_return_same_id_when_resource_is_clone(self, mock_find_by_id, _):
+        mock_find_by_id.return_value = fixture_element("clone", "resourceA")
+        self.assertEqual("resourceA", self.find(id="resourceA"))
+
+
+    def test_return_same_id_when_is_primitive_but_not_in_clone(
+         self, mock_find_by_id, mock_find_parent
+    ):
+        mock_find_by_id.return_value = fixture_element("primitive", "resourceA")
+        mock_find_parent.return_value = None
+
+        self.assertEqual("resourceA", self.find(id="resourceA"))
+
+    def test_refuse_when_resource_is_in_clone(
+         self, mock_find_by_id, mock_find_parent
+    ):
+        mock_find_by_id.return_value = fixture_element("primitive", "resourceA")
+        mock_find_parent.return_value = fixture_element("clone", "clone_id")
+
+        assert_raise_library_error(
+            lambda: self.find(id="resourceA"),
+            (
+                severities.ERROR,
+                report_codes.RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE,
+                {
+                    "resource_id": "resourceA",
+                    "parent_type": "clone",
+                    "parent_id": "clone_id",
+                },
+                report_codes.FORCE_CONSTRAINT_MULTIINSTANCE_RESOURCE
+            ),
+        )
+
+    def test_return_clone_id_when_repair_allowed(
+         self, mock_find_by_id, mock_find_parent
+    ):
+        mock_find_by_id.return_value = fixture_element("primitive", "resourceA")
+        mock_find_parent.return_value = fixture_element("clone", "clone_id")
+
+        self.assertEqual(
+            "clone_id",
+            self.find(can_repair_to_clone=True, id="resourceA")
+        )
+        assert_report_item_list_equal(
+            self.report_processor.report_item_list, []
+        )
+
+    def test_return_resource_id_when_in_clone_allowed(
+         self, mock_find_by_id, mock_find_parent
+    ):
+        mock_find_by_id.return_value = fixture_element("primitive", "resourceA")
+        mock_find_parent.return_value = fixture_element("clone", "clone_id")
+
+        self.assertEqual(
+            "resourceA",
+            self.find(in_clone_allowed=True, id="resourceA")
+        )
+        assert_report_item_list_equal(self.report_processor.report_item_list, [(
+            severities.WARNING,
+            report_codes.RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE,
+            {
+                "resource_id": "resourceA",
+                "parent_type": "clone",
+                "parent_id": "clone_id",
+            },
+        )])
+
+class PrepareOptionsTest(TestCase):
+    def test_refuse_unknown_option(self):
+        assert_raise_library_error(
+            lambda: constraint.prepare_options(
+                ("a", ), {"b": "c"}, mock.MagicMock(), mock.MagicMock()
+            ),
+            (
+                severities.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "b",
+                    "option_type": None,
+                    "allowed": ["a", "id"],
+                }
+            ),
+        )
+
+    def test_complete_id(self):
+        mock_create_id = mock.MagicMock()
+        mock_create_id.return_value = "new-id"
+        self.assertEqual({"id": "new-id"}, constraint.prepare_options(
+            ("a",), {}, mock_create_id, mock.MagicMock()
+        ))
+
+    def test_has_no_side_efect_on_input_options(self):
+        mock_create_id = mock.MagicMock()
+        mock_create_id.return_value = "new-id"
+        options = {"a": "b"}
+        self.assertEqual(
+            {"id": "new-id", "a": "b"},
+            constraint.prepare_options(
+                ("a",),
+                options,
+                mock_create_id, mock.MagicMock()
+            )
+        )
+        self.assertEqual({"a": "b"}, options)
+
+
+    def test_refuse_invalid_id(self):
+        class SomeException(Exception):
+            pass
+        mock_validate_id = mock.MagicMock()
+        mock_validate_id.side_effect = SomeException()
+        self.assertRaises(
+            SomeException,
+            lambda: constraint.prepare_options(
+                ("a", ), {"id": "invalid"}, mock.MagicMock(), mock_validate_id
+            ),
+        )
+        mock_validate_id.assert_called_once_with("invalid")
+
+class CreateIdTest(TestCase):
+    @mock.patch(
+        "pcs.lib.cib.constraint.constraint.resource_set.extract_id_set_list"
+    )
+    @mock.patch("pcs.lib.cib.constraint.constraint.find_unique_id")
+    def test_create_id_from_resource_set_list(self, mock_find_id, mock_extract):
+        mock_extract.return_value = [["A", "B"], ["C"]]
+        mock_find_id.return_value = "some_id"
+        self.assertEqual(
+            "some_id",
+            constraint.create_id("cib", "PREFIX", "resource_set_list")
+        )
+        mock_extract.assert_called_once_with("resource_set_list")
+        mock_find_id.assert_called_once_with("cib", "pcs_PREFIX_set_A_B_set_C")
+
+def fixture_constraint_section(return_value):
+    constraint_section = mock.MagicMock()
+    constraint_section.findall = mock.MagicMock()
+    constraint_section.findall.return_value = return_value
+    return constraint_section
+
+ at mock.patch("pcs.lib.cib.constraint.constraint.export_with_set")
+class CheckIsWithoutDuplicationTest(TestCase):
+    def test_raises_when_duplicate_element_found(self, export_with_set):
+        export_with_set.return_value = "exported_duplicate_element"
+        element = mock.MagicMock()
+        element.tag = "constraint_type"
+
+        report_processor = MockLibraryReportProcessor()
+        assert_raise_library_error(
+            lambda: constraint.check_is_without_duplication(
+                report_processor,
+                fixture_constraint_section(["duplicate_element"]), element,
+                are_duplicate=lambda e1, e2: True,
+                export_element=constraint.export_with_set,
+            ),
+            (
+                severities.ERROR,
+                report_codes.DUPLICATE_CONSTRAINTS_EXIST,
+                {
+                    'constraint_info_list': ['exported_duplicate_element'],
+                    'constraint_type': 'constraint_type'
+                },
+                report_codes.FORCE_CONSTRAINT_DUPLICATE
+            ),
+        )
+    def test_success_when_no_duplication_found(self, export_with_set):
+        export_with_set.return_value = "exported_duplicate_element"
+        element = mock.MagicMock()
+        element.tag = "constraint_type"
+        #no exception raised
+        report_processor = MockLibraryReportProcessor()
+        constraint.check_is_without_duplication(
+            report_processor, fixture_constraint_section([]), element,
+            are_duplicate=lambda e1, e2: True,
+            export_element=constraint.export_with_set,
+        )
+    def test_report_when_duplication_allowed(self, export_with_set):
+        export_with_set.return_value = "exported_duplicate_element"
+        element = mock.MagicMock()
+        element.tag = "constraint_type"
+
+        report_processor = MockLibraryReportProcessor()
+        constraint.check_is_without_duplication(
+            report_processor,
+            fixture_constraint_section(["duplicate_element"]), element,
+            are_duplicate=lambda e1, e2: True,
+            export_element=constraint.export_with_set,
+            duplication_alowed=True,
+        )
+        assert_report_item_list_equal(
+            report_processor.report_item_list,
+            [
+                (
+                    severities.WARNING,
+                    report_codes.DUPLICATE_CONSTRAINTS_EXIST,
+                    {
+                        'constraint_info_list': ['exported_duplicate_element'],
+                        'constraint_type': 'constraint_type'
+                    },
+                )
+            ]
+        )
+
+
+class CreateWithSetTest(TestCase):
+    def test_put_new_constraint_to_constraint_section(self):
+        constraint_section = etree.Element("constraints")
+        constraint.create_with_set(
+            constraint_section,
+            "ticket",
+            {"a": "b"},
+            [{"ids": ["A", "B"], "options": {"c": "d"}}]
+        )
+        assert_xml_equal(etree.tostring(constraint_section).decode(), """
+            <constraints>
+                <ticket a="b">
+                    <resource_set c="d" id="pcs_rsc_set_A_B">
+                        <resource_ref id="A"/>
+                        <resource_ref id="B"/>
+                    </resource_set>
+                </ticket>
+            </constraints>
+        """)
+
+    def test_refuse_empty_resource_set_list(self):
+        constraint_section = etree.Element("constraints")
+        assert_raise_library_error(
+            lambda: constraint.create_with_set(
+                constraint_section,
+                "ticket",
+                {"a": "b"},
+                []
+            ),
+            (severities.ERROR, report_codes.EMPTY_RESOURCE_SET_LIST, {})
+        )
diff --git a/pcs/lib/cib/test/test_constraint_colocation.py b/pcs/lib/cib/test/test_constraint_colocation.py
new file mode 100644
index 0000000..377b981
--- /dev/null
+++ b/pcs/lib/cib/test/test_constraint_colocation.py
@@ -0,0 +1,99 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+
+from pcs.common import report_codes
+from pcs.lib.cib.constraint import colocation
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.pcs_mock import mock
+
+
+#Patch check_new_id_applicable is always desired when working with
+#prepare_options_with_set. Patched function raises when id not applicable
+#and do nothing when applicable - in this case tests do no actions with it
+ at mock.patch("pcs.lib.cib.constraint.colocation.check_new_id_applicable")
+class PrepareOptionsWithSetTest(TestCase):
+    def setUp(self):
+        self.cib = "cib"
+        self.resource_set_list = "resource_set_list"
+        self.prepare = lambda options: colocation.prepare_options_with_set(
+            self.cib,
+            options,
+            self.resource_set_list,
+        )
+
+    @mock.patch("pcs.lib.cib.constraint.colocation.constraint.create_id")
+    def test_complete_id(self, mock_create_id, _):
+        mock_create_id.return_value = "generated_id"
+        options = {"score": "1"}
+        expected_options = options.copy()
+        expected_options.update({"id": "generated_id"})
+        self.assertEqual(expected_options, self.prepare(options))
+        mock_create_id.assert_called_once_with(
+            self.cib,
+            colocation.TAG_NAME,
+            self.resource_set_list
+        )
+
+    def test_refuse_invalid_id(self, mock_check_new_id_applicable):
+        mock_check_new_id_applicable.side_effect = Exception()
+        invalid_id = "invalid_id"
+        self.assertRaises(Exception, lambda: self.prepare({
+            "score": "1",
+            "id": invalid_id,
+        }))
+        mock_check_new_id_applicable.assert_called_once_with(
+            self.cib,
+            colocation.DESCRIPTION,
+            invalid_id
+        )
+
+    def test_refuse_bad_score(self, _):
+        assert_raise_library_error(
+            lambda: self.prepare({
+                "score": "bad",
+                "id": "id",
+            }),
+            (severities.ERROR, report_codes.INVALID_SCORE, {
+                'score': 'bad'
+            }),
+        )
+
+    def test_refuse_more_scores(self, _):
+        assert_raise_library_error(
+            lambda: self.prepare({
+                "score": "1",
+                "score-attribute": "2",
+                "id": "id",
+            }),
+            (severities.ERROR, report_codes.MULTIPLE_SCORE_OPTIONS, {}),
+        )
+
+    def test_refuse_unknown_attributes(self, _):
+        assert_raise_library_error(
+            lambda: self.prepare({
+                "score": "1",
+                "unknown": "value",
+                "id": "id",
+            }),
+            (
+                severities.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "unknown",
+                    "option_type": None,
+                    "allowed": [
+                        "id",
+                        "score",
+                        "score-attribute",
+                        "score-attribute-mangle",
+                    ],
+                }
+            ),
+        )
diff --git a/pcs/lib/cib/test/test_constraint_order.py b/pcs/lib/cib/test/test_constraint_order.py
new file mode 100644
index 0000000..02d1c5f
--- /dev/null
+++ b/pcs/lib/cib/test/test_constraint_order.py
@@ -0,0 +1,103 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+
+from pcs.common import report_codes
+from pcs.lib.cib.constraint import order
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.pcs_mock import mock
+
+
+#Patch check_new_id_applicable is always desired when working with
+#prepare_options_with_set. Patched function raises when id not applicable
+#and do nothing when applicable - in this case tests do no actions with it
+ at mock.patch("pcs.lib.cib.constraint.order.check_new_id_applicable")
+class PrepareOptionsWithSetTest(TestCase):
+    def setUp(self):
+        self.cib = "cib"
+        self.resource_set_list = "resource_set_list"
+        self.prepare = lambda options: order.prepare_options_with_set(
+            self.cib,
+            options,
+            self.resource_set_list,
+        )
+
+    @mock.patch("pcs.lib.cib.constraint.order.constraint.create_id")
+    def test_complete_id(self, mock_create_id, _):
+        mock_create_id.return_value = "generated_id"
+        options = {"symmetrical": "true", "kind": "Optional"}
+        expected_options = options.copy()
+        expected_options.update({"id": "generated_id"})
+        self.assertEqual(expected_options, self.prepare(options))
+        mock_create_id.assert_called_once_with(
+            self.cib,
+            order.TAG_NAME,
+            self.resource_set_list
+        )
+
+    def test_refuse_invalid_id(self, mock_check_new_id_applicable):
+        mock_check_new_id_applicable.side_effect = Exception()
+        invalid_id = "invalid_id"
+        self.assertRaises(Exception, lambda: self.prepare({
+            "symmetrical": "true",
+            "kind": "Optional",
+            "id": invalid_id,
+        }))
+        mock_check_new_id_applicable.assert_called_once_with(
+            self.cib,
+            order.DESCRIPTION,
+            invalid_id
+        )
+
+    def test_refuse_unknown_kind(self, _):
+        assert_raise_library_error(
+            lambda: self.prepare({
+                "symmetrical": "true",
+                "kind": "unknown",
+                "id": "id",
+            }),
+            (severities.ERROR, report_codes.INVALID_OPTION_VALUE, {
+                'allowed_values': ('Optional', 'Mandatory', 'Serialize'),
+                'option_value': 'unknown',
+                'option_name': 'kind',
+            }),
+        )
+
+    def test_refuse_unknown_symmetrical(self, _):
+        assert_raise_library_error(
+            lambda: self.prepare({
+                "symmetrical": "unknown",
+                "kind": "Optional",
+                "id": "id",
+            }),
+            (severities.ERROR, report_codes.INVALID_OPTION_VALUE, {
+                'allowed_values': ('true', 'false'),
+                'option_value': 'unknown',
+                'option_name': 'symmetrical',
+            }),
+        )
+
+    def test_refuse_unknown_attributes(self, _):
+        assert_raise_library_error(
+            lambda: self.prepare({
+                "symmetrical": "unknown",
+                "kind": "Optional",
+                "unknown": "value",
+                "id": "id",
+            }),
+            (
+                severities.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "unknown",
+                    "option_type": None,
+                    "allowed": [ "id", "kind", "symmetrical"],
+                }
+            ),
+        )
diff --git a/pcs/lib/cib/test/test_constraint_ticket.py b/pcs/lib/cib/test/test_constraint_ticket.py
new file mode 100644
index 0000000..4f21500
--- /dev/null
+++ b/pcs/lib/cib/test/test_constraint_ticket.py
@@ -0,0 +1,295 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from functools import partial
+from unittest import TestCase
+
+from pcs.common import report_codes
+from pcs.lib.cib.constraint import ticket
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.pcs_mock import mock
+
+
+ at mock.patch("pcs.lib.cib.constraint.ticket.tools.check_new_id_applicable")
+class PrepareOptionsPlainTest(TestCase):
+    def setUp(self):
+        self.cib = "cib"
+        self.prepare = partial(ticket.prepare_options_plain, self.cib)
+
+    @mock.patch("pcs.lib.cib.constraint.ticket._create_id")
+    def test_prepare_correct_options(self, mock_create_id, _):
+        mock_create_id.return_value = "generated_id"
+        self.assertEqual(
+            {
+                'id': 'generated_id',
+                'loss-policy': 'fence',
+                'rsc': 'resourceA',
+                'rsc-role': 'Master',
+                'ticket': 'ticket_key'
+            },
+            self.prepare(
+                {"loss-policy": "fence", "rsc-role": "master"},
+                "ticket_key",
+                "resourceA",
+            )
+        )
+
+    @mock.patch("pcs.lib.cib.constraint.ticket._create_id")
+    def test_does_not_include_role_if_not_presented(self, mock_create_id, _):
+        mock_create_id.return_value = "generated_id"
+        self.assertEqual(
+            {
+                'id': 'generated_id',
+                'loss-policy': 'fence',
+                'rsc': 'resourceA',
+                'ticket': 'ticket_key'
+            },
+            self.prepare(
+                {"loss-policy": "fence", "rsc-role": ""},
+                "ticket_key",
+                "resourceA",
+            )
+        )
+
+    def test_refuse_unknown_attributes(self, _):
+        assert_raise_library_error(
+            lambda: self.prepare(
+                {"unknown": "nonsense", "rsc-role": "master"},
+                "ticket_key",
+                "resourceA",
+            ),
+            (
+                severities.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "unknown",
+                    "option_type": None,
+                    "allowed": ["id", "loss-policy", "rsc", "rsc-role", "ticket"],
+                }
+            ),
+        )
+
+    def test_refuse_bad_role(self, _):
+        assert_raise_library_error(
+            lambda: self.prepare(
+                {"id": "id", "rsc-role": "bad_role"}, "ticket_key", "resourceA"
+            ),
+            (severities.ERROR, report_codes.INVALID_OPTION_VALUE, {
+                'allowed_values': ('Stopped', 'Started', 'Master', 'Slave'),
+                'option_value': 'bad_role',
+                'option_name': 'rsc-role',
+            }),
+        )
+
+    def test_refuse_missing_ticket(self, _):
+        assert_raise_library_error(
+            lambda: self.prepare(
+                {"id": "id", "rsc-role": "master"}, "", "resourceA"
+            ),
+            (
+                severities.ERROR,
+                report_codes.REQUIRED_OPTION_IS_MISSING,
+                {
+                    "option_name": "ticket"
+                }
+            ),
+        )
+
+    def test_refuse_missing_resource_id(self, _):
+        assert_raise_library_error(
+            lambda: self.prepare(
+                {"id": "id", "rsc-role": "master"}, "ticket_key", ""
+            ),
+            (
+                severities.ERROR,
+                report_codes.REQUIRED_OPTION_IS_MISSING,
+                {
+                    "option_name": "rsc",
+                }
+            ),
+        )
+
+
+    def test_refuse_unknown_lost_policy(self, mock_check_new_id_applicable):
+        assert_raise_library_error(
+            lambda: self.prepare(
+                { "loss-policy": "unknown", "ticket": "T", "id": "id"},
+                "ticket_key",
+                "resourceA",
+            ),
+            (severities.ERROR, report_codes.INVALID_OPTION_VALUE, {
+                'allowed_values': ('fence', 'stop', 'freeze', 'demote'),
+                'option_value': 'unknown',
+                'option_name': 'loss-policy',
+            }),
+        )
+
+    @mock.patch("pcs.lib.cib.constraint.ticket._create_id")
+    def test_complete_id(self, mock_create_id, _):
+        mock_create_id.return_value = "generated_id"
+        options = {"loss-policy": "freeze", "ticket": "T", "rsc-role": "Master"}
+        ticket_key = "ticket_key"
+        resource_id = "resourceA"
+        expected_options = options.copy()
+        expected_options.update({
+            "id": "generated_id",
+            "rsc": resource_id,
+            "rsc-role": "Master",
+            "ticket": ticket_key,
+        })
+        self.assertEqual(expected_options, self.prepare(
+            options,
+            ticket_key,
+            resource_id,
+        ))
+        mock_create_id.assert_called_once_with(
+            self.cib,
+            ticket_key,
+            resource_id,
+            "Master",
+        )
+
+
+#Patch check_new_id_applicable is always desired when working with
+#prepare_options_with_set. Patched function raises when id not applicable
+#and do nothing when applicable - in this case tests do no actions with it
+ at mock.patch("pcs.lib.cib.constraint.ticket.tools.check_new_id_applicable")
+class PrepareOptionsWithSetTest(TestCase):
+    def setUp(self):
+        self.cib = "cib"
+        self.resource_set_list = "resource_set_list"
+        self.prepare = lambda options: ticket.prepare_options_with_set(
+            self.cib,
+            options,
+            self.resource_set_list,
+        )
+
+    @mock.patch("pcs.lib.cib.constraint.ticket.constraint.create_id")
+    def test_complete_id(self, mock_create_id, _):
+        mock_create_id.return_value = "generated_id"
+        options = {"loss-policy": "freeze", "ticket": "T"}
+        expected_options = options.copy()
+        expected_options.update({"id": "generated_id"})
+        self.assertEqual(expected_options, self.prepare(options))
+        mock_create_id.assert_called_once_with(
+            self.cib,
+            ticket.TAG_NAME,
+            self.resource_set_list
+        )
+
+    def test_refuse_invalid_id(self, mock_check_new_id_applicable):
+        class SomeException(Exception):
+            pass
+        mock_check_new_id_applicable.side_effect = SomeException()
+        invalid_id = "invalid_id"
+        self.assertRaises(SomeException, lambda: self.prepare({
+            "loss-policy": "freeze",
+            "ticket": "T",
+            "id": invalid_id,
+        }))
+        mock_check_new_id_applicable.assert_called_once_with(
+            self.cib,
+            ticket.DESCRIPTION,
+            invalid_id
+        )
+
+    def test_refuse_unknown_lost_policy(self, _):
+        assert_raise_library_error(
+            lambda: self.prepare({
+                "loss-policy": "unknown",
+                "ticket": "T",
+                "id": "id",
+            }),
+            (severities.ERROR, report_codes.INVALID_OPTION_VALUE, {
+                'allowed_values': ('fence', 'stop', 'freeze', 'demote'),
+                'option_value': 'unknown',
+                'option_name': 'loss-policy',
+            }),
+        )
+
+    def test_refuse_missing_ticket(self, _):
+        assert_raise_library_error(
+            lambda: self.prepare({"loss-policy": "stop", "id": "id"}),
+            (
+                severities.ERROR,
+                report_codes.REQUIRED_OPTION_IS_MISSING,
+                {"option_name": "ticket"}
+            )
+        )
+
+
+class Element(object):
+    def __init__(self, attrib):
+        self.attrib = attrib
+
+    def update(self, attrib):
+        self.attrib.update(attrib)
+        return self
+
+
+class AreDuplicatePlain(TestCase):
+    def setUp(self):
+        self.first = Element({
+            "ticket": "ticket_key",
+            "rsc": "resurceA",
+            "rsc-role": "Master"
+        })
+        self.second = Element({
+            "ticket": "ticket_key",
+            "rsc": "resurceA",
+            "rsc-role": "Master"
+        })
+
+    def test_returns_true_for_duplicate_elements(self):
+        self.assertTrue(ticket.are_duplicate_plain(self.first, self.second))
+
+    def test_returns_false_for_different_ticket(self):
+        self.assertFalse(ticket.are_duplicate_plain(
+            self.first,
+            self.second.update({"ticket": "X"})
+        ))
+
+    def test_returns_false_for_different_resource(self):
+        self.assertFalse(ticket.are_duplicate_plain(
+            self.first,
+            self.second.update({"rsc": "Y"})
+        ))
+
+    def test_returns_false_for_different_role(self):
+        self.assertFalse(ticket.are_duplicate_plain(
+            self.first,
+            self.second.update({"rsc-role": "Z"})
+        ))
+
+    def test_returns_false_for_different_elements(self):
+        self.second.update({
+            "ticket": "X",
+            "rsc": "Y",
+            "rsc-role": "Z"
+        })
+        self.assertFalse(ticket.are_duplicate_plain(self.first, self.second))
+
+ at mock.patch("pcs.lib.cib.constraint.ticket.constraint.have_duplicate_resource_sets")
+class AreDuplicateWithResourceSet(TestCase):
+    def test_returns_true_for_duplicate_elements(
+        self, mock_have_duplicate_resource_sets
+    ):
+        mock_have_duplicate_resource_sets.return_value = True
+        self.assertTrue(ticket.are_duplicate_with_resource_set(
+            Element({"ticket": "ticket_key"}),
+            Element({"ticket": "ticket_key"}),
+        ))
+
+    def test_returns_false_for_different_elements(
+        self, mock_have_duplicate_resource_sets
+    ):
+        mock_have_duplicate_resource_sets.return_value = True
+        self.assertFalse(ticket.are_duplicate_with_resource_set(
+            Element({"ticket": "ticket_key"}),
+            Element({"ticket": "X"}),
+        ))
diff --git a/pcs/lib/cib/test/test_resource_set.py b/pcs/lib/cib/test/test_resource_set.py
new file mode 100644
index 0000000..7b77ac4
--- /dev/null
+++ b/pcs/lib/cib/test/test_resource_set.py
@@ -0,0 +1,110 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+
+from lxml import etree
+
+from pcs.common import report_codes
+from pcs.lib.cib.constraint import resource_set
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.assertions import(
+    assert_raise_library_error,
+    assert_xml_equal
+)
+from pcs.test.tools.pcs_mock import mock
+
+
+class PrepareSetTest(TestCase):
+    def test_return_corrected_resurce_set(self):
+        find_valid_id = mock.Mock()
+        find_valid_id.side_effect = lambda id: {"A": "AA", "B": "BB"}[id]
+        self.assertEqual(
+            {"ids": ["AA", "BB"], "options": {"sequential": "true"}},
+            resource_set.prepare_set(find_valid_id, {
+                "ids": ["A", "B"],
+                "options": {"sequential": "true"}
+            })
+        )
+
+    def test_refuse_invalid_attribute_name(self):
+        assert_raise_library_error(
+            lambda: resource_set.prepare_set(mock.Mock(), {
+                "ids": ["A", "B"],
+                "options": {"invalid_name": "true"}
+            }),
+            (
+                severities.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "invalid_name",
+                    "option_type": None,
+                    "allowed": ["action", "require-all", "role", "sequential"],
+            }),
+        )
+
+    def test_refuse_invalid_attribute_value(self):
+        assert_raise_library_error(
+            lambda: resource_set.prepare_set(mock.Mock(), {
+                "ids": ["A", "B"],
+                "options": {"role": "invalid"}
+            }),
+            (severities.ERROR, report_codes.INVALID_OPTION_VALUE, {
+                'option_name': 'role',
+                'allowed_values': ('Stopped', 'Started', 'Master', 'Slave'),
+                'option_value': 'invalid',
+            }),
+        )
+
+class ExtractIdListTest(TestCase):
+    def test_return_id_list_from_resource_set_list(self):
+        self.assertEqual(
+            [["A", "B"], ["C", "D"]],
+            resource_set.extract_id_set_list([
+                {"ids": ["A", "B"], "options": {}},
+                {"ids": ["C", "D"], "options": {}},
+            ])
+        )
+
+class CreateTest(TestCase):
+    def test_resource_set_to_parent(self):
+        constraint_element = etree.Element("constraint")
+        resource_set.create(
+            constraint_element,
+            {"ids": ["A", "B"], "options": {"sequential": "true"}},
+        )
+        assert_xml_equal(etree.tostring(constraint_element).decode(), """
+            <constraint>
+              <resource_set id="pcs_rsc_set_A_B" sequential="true">
+                <resource_ref id="A"></resource_ref>
+                <resource_ref id="B"></resource_ref>
+              </resource_set>
+            </constraint>
+        """)
+
+class GetResourceIdListTest(TestCase):
+    def test_returns_id_list_from_element(self):
+        element = etree.Element("resource_set")
+        for id in ("A", "B"):
+            etree.SubElement(element, "resource_ref").attrib["id"] = id
+
+        self.assertEqual(
+            ["A", "B"],
+            resource_set.get_resource_id_set_list(element)
+        )
+
+class ExportTest(TestCase):
+    def test_returns_element_in_dict_representation(self):
+        element = etree.Element("resource_set")
+        element.attrib.update({"role": "Master"})
+        for id in ("A", "B"):
+            etree.SubElement(element, "resource_ref").attrib["id"] = id
+
+        self.assertEqual(
+            {'options': {'role': 'Master'}, 'ids': ['A', 'B']},
+            resource_set.export(element)
+        )
diff --git a/pcs/lib/cib/tools.py b/pcs/lib/cib/tools.py
new file mode 100644
index 0000000..dfe31fc
--- /dev/null
+++ b/pcs/lib/cib/tools.py
@@ -0,0 +1,89 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.lib import reports
+from pcs.lib.errors import LibraryError
+from pcs.lib.pacemaker_values import validate_id
+
+def does_id_exist(tree, check_id):
+    """
+    Checks to see if id exists in the xml dom passed
+    tree cib etree node
+    check_id id to check
+    """
+    return tree.find('.//*[@id="{0}"]'.format(check_id)) is not None
+
+def validate_id_does_not_exist(tree, id):
+    """
+    tree cib etree node
+    """
+    if does_id_exist(tree, id):
+        raise LibraryError(reports.id_already_exists(id))
+
+def find_unique_id(tree, check_id):
+    """
+    Returns check_id if it doesn't exist in the dom, otherwise it adds
+    an integer to the end of the id and increments it until a unique id is found
+    tree cib etree node
+    check_id id to check
+    """
+    counter = 1
+    temp_id = check_id
+    while does_id_exist(tree, temp_id):
+        temp_id = "{0}-{1}".format(check_id, counter)
+        counter += 1
+    return temp_id
+
+def check_new_id_applicable(tree, description, id):
+    validate_id(id, description)
+    validate_id_does_not_exist(tree, id)
+
+def _get_mandatory_section(tree, section_name):
+    """
+    Return required element from tree, raise LibraryError if missing
+    tree cib etree node
+    """
+    section = tree.find(".//{0}".format(section_name))
+    if section is not None:
+        return section
+    raise LibraryError(reports.cib_missing_mandatory_section(section_name))
+
+def get_configuration(tree):
+    """
+    Return 'configuration' element from tree, raise LibraryError if missing
+    tree cib etree node
+    """
+    return _get_mandatory_section(tree, "configuration")
+
+def get_acls(tree):
+    """
+    Return 'acls' element from tree, create a new one if missing
+    tree cib etree node
+    """
+    acls = tree.find(".//acls")
+    if acls is None:
+        acls = etree.SubElement(get_configuration(tree), "acls")
+    return acls
+
+def get_constraints(tree):
+    """
+    Return 'constraint' element from tree
+    tree cib etree node
+    """
+    return _get_mandatory_section(tree, "configuration/constraints")
+
+def find_parent(element, tag_names):
+    candidate = element
+    while True:
+        if candidate is None or candidate.tag in tag_names:
+            return candidate
+        candidate = candidate.getparent()
+
+def export_attributes(element):
+    return  dict((key, value) for key, value in element.attrib.items())
diff --git a/pcs/lib/commands/__init__.py b/pcs/lib/commands/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/lib/commands/constraint/__init__.py b/pcs/lib/commands/constraint/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/lib/commands/constraint/colocation.py b/pcs/lib/commands/constraint/colocation.py
new file mode 100644
index 0000000..e384867
--- /dev/null
+++ b/pcs/lib/commands/constraint/colocation.py
@@ -0,0 +1,25 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from functools import partial
+
+from pcs.lib.cib.constraint import colocation
+import pcs.lib.commands.constraint.common
+
+#configure common constraint command
+show = partial(
+    pcs.lib.commands.constraint.common.show,
+    colocation.TAG_NAME,
+    lambda element: element.attrib.has_key('rsc')
+)
+
+#configure common constraint command
+create_with_set = partial(
+    pcs.lib.commands.constraint.common.create_with_set,
+    colocation.TAG_NAME,
+    colocation.prepare_options_with_set
+)
diff --git a/pcs/lib/commands/constraint/common.py b/pcs/lib/commands/constraint/common.py
new file mode 100644
index 0000000..aef1403
--- /dev/null
+++ b/pcs/lib/commands/constraint/common.py
@@ -0,0 +1,88 @@
+"""
+Common functions used from specific constraint commands.
+Functions of this module are not intended to be used for direct call from
+client.
+"""
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from functools import partial
+
+from pcs.lib.cib.constraint import constraint, resource_set
+from pcs.lib.cib.tools import get_constraints
+
+
+def create_with_set(
+    tag_name, prepare_options, env, resource_set_list, constraint_options,
+    can_repair_to_clone=False,
+    resource_in_clone_alowed=False,
+    duplication_alowed=False,
+    duplicate_check=None,
+):
+    """
+    string tag_name is constraint tag name
+    callable prepare_options takes
+        cib(Element), options(dict), resource_set_list and return corrected
+        options or if options not usable raises error
+    env is library environment
+    list resource_set_list is description of resource set, for example:
+        {"ids": ["A", "B"], "options": {"sequential": "true"}},
+    dict constraint_options is base for building attributes of constraint tag
+    bool resource_in_clone_alowed flag for allowing to reference id which is
+        in tag clone or master
+    bool duplication_alowed flag for allowing create duplicate element
+    callable duplicate_check takes two elements and decide if they are
+        duplicates
+    """
+    cib = env.get_cib()
+
+    find_valid_resource_id = partial(
+        constraint.find_valid_resource_id,
+        env.report_processor, cib, can_repair_to_clone, resource_in_clone_alowed
+    )
+
+    constraint_section = get_constraints(cib)
+    constraint_element = constraint.create_with_set(
+        constraint_section,
+        tag_name,
+        options=prepare_options(cib, constraint_options, resource_set_list),
+        resource_set_list=[
+             resource_set.prepare_set(find_valid_resource_id, resource_set_item)
+             for resource_set_item in resource_set_list
+        ]
+    )
+
+    if not duplicate_check:
+        duplicate_check = constraint.have_duplicate_resource_sets
+
+    constraint.check_is_without_duplication(
+        env.report_processor,
+        constraint_section,
+        constraint_element,
+        are_duplicate=duplicate_check,
+        export_element=constraint.export_with_set,
+        duplication_alowed=duplication_alowed,
+    )
+
+    env.push_cib(cib)
+
+def show(tag_name, is_plain, env):
+    """
+    string tag_name is constraint tag name
+    callable is_plain takes constraint element and returns if is plain (i.e.
+        without resource set)
+    env is library environment
+    """
+    constraints_info = {"plain": [], "with_resource_sets": []}
+    for element in get_constraints(env.get_cib()).findall(".//"+tag_name):
+        if is_plain(element):
+            constraints_info["plain"].append(constraint.export_plain(element))
+        else:
+            constraints_info["with_resource_sets"].append(
+                constraint.export_with_set(element)
+            )
+    return constraints_info
diff --git a/pcs/lib/commands/constraint/order.py b/pcs/lib/commands/constraint/order.py
new file mode 100644
index 0000000..d2b2b33
--- /dev/null
+++ b/pcs/lib/commands/constraint/order.py
@@ -0,0 +1,24 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from functools import partial
+from pcs.lib.cib.constraint import order
+import pcs.lib.commands.constraint.common
+
+#configure common constraint command
+show = partial(
+    pcs.lib.commands.constraint.common.show,
+    order.TAG_NAME,
+    lambda element: element.attrib.has_key('first')
+)
+
+#configure common constraint command
+create_with_set = partial(
+    pcs.lib.commands.constraint.common.create_with_set,
+    order.TAG_NAME,
+    order.prepare_options_with_set
+)
diff --git a/pcs/lib/commands/constraint/ticket.py b/pcs/lib/commands/constraint/ticket.py
new file mode 100644
index 0000000..e6960d5
--- /dev/null
+++ b/pcs/lib/commands/constraint/ticket.py
@@ -0,0 +1,70 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from functools import partial
+
+from pcs.lib.cib.constraint import constraint, ticket
+from pcs.lib.cib.tools import get_constraints
+import pcs.lib.commands.constraint.common
+
+
+#configure common constraint command
+show = partial(
+    pcs.lib.commands.constraint.common.show,
+    ticket.TAG_NAME,
+    lambda element: element.attrib.has_key('rsc')
+)
+
+#configure common constraint command
+create_with_set = partial(
+    pcs.lib.commands.constraint.common.create_with_set,
+    ticket.TAG_NAME,
+    ticket.prepare_options_with_set,
+    duplicate_check=ticket.are_duplicate_with_resource_set,
+)
+
+def create(
+    env, ticket_key, resource_id, options,
+    autocorrection_allowed=False,
+    resource_in_clone_alowed=False,
+    duplication_alowed=False,
+):
+    """
+    create ticket constraint
+    string ticket_key ticket for constraining resource
+    dict options desired constraint attributes
+    bool resource_in_clone_alowed flag for allowing to reference id which is
+        in tag clone or master
+    bool duplication_alowed flag for allowing create duplicate element
+    callable duplicate_check takes two elements and decide if they are
+        duplicates
+    """
+    cib = env.get_cib()
+
+    options = ticket.prepare_options_plain(
+        cib,
+        options,
+        ticket_key,
+        constraint.find_valid_resource_id(
+            env.report_processor, cib,
+            autocorrection_allowed, resource_in_clone_alowed, resource_id
+        ),
+    )
+
+    constraint_section = get_constraints(cib)
+    constraint_element = ticket.create_plain(constraint_section, options)
+
+    constraint.check_is_without_duplication(
+        env.report_processor,
+        constraint_section,
+        constraint_element,
+        are_duplicate=ticket.are_duplicate_plain,
+        export_element=constraint.export_plain,
+        duplication_alowed=duplication_alowed,
+    )
+
+    env.push_cib(cib)
diff --git a/pcs/lib/commands/quorum.py b/pcs/lib/commands/quorum.py
new file mode 100644
index 0000000..1ee5411
--- /dev/null
+++ b/pcs/lib/commands/quorum.py
@@ -0,0 +1,107 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+
+from pcs.lib import reports
+from pcs.lib.errors import LibraryError
+
+
+def get_config(lib_env):
+    """
+    Extract and return quorum configuration from corosync.conf
+    lib_env LibraryEnvironment
+    """
+    __ensure_not_cman(lib_env)
+    cfg = lib_env.get_corosync_conf()
+    device = None
+    if cfg.has_quorum_device():
+        model, model_options, generic_options = cfg.get_quorum_device_settings()
+        device = {
+            "model": model,
+            "model_options": model_options,
+            "generic_options": generic_options,
+        }
+    return {
+        "options": cfg.get_quorum_options(),
+        "device": device,
+    }
+
+def set_options(lib_env, options, skip_offline_nodes=False):
+    """
+    Set corosync quorum options, distribute and reload corosync.conf if live
+    lib_env LibraryEnvironment
+    options quorum options (dict)
+    skip_offline_nodes continue even if not all nodes are accessible
+    """
+    __ensure_not_cman(lib_env)
+    cfg = lib_env.get_corosync_conf()
+    cfg.set_quorum_options(lib_env.report_processor, options)
+    lib_env.push_corosync_conf(cfg, skip_offline_nodes)
+
+def add_device(
+    lib_env, model, model_options, generic_options, force_model=False,
+    force_options=False, skip_offline_nodes=False
+):
+    """
+    Add quorum device to cluster, distribute and reload configs if live
+    model quorum device model
+    model_options model specific options dict
+    generic_options generic quorum device options dict
+    force_model continue even if the model is not valid
+    force_options continue even if options are not valid
+    skip_offline_nodes continue even if not all nodes are accessible
+    """
+    __ensure_not_cman(lib_env)
+
+    cfg = lib_env.get_corosync_conf()
+    cfg.add_quorum_device(
+        lib_env.report_processor,
+        model,
+        model_options,
+        generic_options,
+        force_model,
+        force_options
+    )
+    # TODO validation, verification, certificates, etc.
+    lib_env.push_corosync_conf(cfg, skip_offline_nodes)
+
+def update_device(
+    lib_env, model_options, generic_options, force_options=False,
+    skip_offline_nodes=False
+):
+    """
+    Change quorum device settings, distribute and reload configs if live
+    model_options model specific options dict
+    generic_options generic quorum device options dict
+    force_options continue even if options are not valid
+    skip_offline_nodes continue even if not all nodes are accessible
+    """
+    __ensure_not_cman(lib_env)
+    cfg = lib_env.get_corosync_conf()
+    cfg.update_quorum_device(
+        lib_env.report_processor,
+        model_options,
+        generic_options,
+        force_options
+    )
+    lib_env.push_corosync_conf(cfg, skip_offline_nodes)
+
+def remove_device(lib_env, skip_offline_nodes=False):
+    """
+    Stop using quorum device, distribute and reload configs if live
+    skip_offline_nodes continue even if not all nodes are accessible
+    """
+    __ensure_not_cman(lib_env)
+
+    cfg = lib_env.get_corosync_conf()
+    cfg.remove_quorum_device()
+    lib_env.push_corosync_conf(cfg, skip_offline_nodes)
+
+def __ensure_not_cman(lib_env):
+    if lib_env.is_corosync_conf_live and lib_env.is_cman_cluster:
+        raise LibraryError(reports.cman_unsupported_command())
+
diff --git a/pcs/lib/commands/test/__init__.py b/pcs/lib/commands/test/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/lib/commands/test/test_constraint_common.py b/pcs/lib/commands/test/test_constraint_common.py
new file mode 100644
index 0000000..e0872ff
--- /dev/null
+++ b/pcs/lib/commands/test/test_constraint_common.py
@@ -0,0 +1,193 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+
+from lxml import etree
+
+from pcs.common import report_codes
+from pcs.lib.commands.constraint import common as constraint
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.assertions import(
+    assert_raise_library_error,
+    assert_xml_equal,
+)
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.pcs_mock import mock
+
+
+def fixture_cib_and_constraints():
+    cib = etree.Element("cib")
+    resources_section = etree.SubElement(cib, "resources")
+    for id in ("A", "B", "E", "F"):
+        etree.SubElement(resources_section, "primitive").attrib["id"] = id
+    constraint_section = etree.SubElement(
+        etree.SubElement(cib, "configuration"),
+        "constraints"
+    )
+    return cib, constraint_section
+
+def fixture_env(cib):
+    env = mock.MagicMock()
+    env.get_cib = mock.Mock()
+    env.get_cib.return_value = cib
+    env.push_cib = mock.Mock()
+    env.report_processor = MockLibraryReportProcessor()
+    return env
+
+class CreateWithSetTest(TestCase):
+    def setUp(self):
+        self.cib, self.constraint_section = fixture_cib_and_constraints()
+        self.env = fixture_env(self.cib)
+        self.independent_cib = etree.XML(etree.tostring(self.cib))
+
+    def create(self, duplication_alowed=False):
+        constraint.create_with_set(
+            "rsc_some",
+            lambda cib, options, resource_set_list: options,
+            self.env,
+            [
+                {"ids": ["A", "B"], "options": {"role": "Master"}},
+                {"ids": ["E", "F"], "options": {"action": "start"}},
+            ],
+            {"id":"some_id", "symmetrical": "true"},
+            duplication_alowed=duplication_alowed
+        )
+
+    def test_put_new_constraint_to_constraint_section(self):
+        self.create()
+        self.env.push_cib.assert_called_once_with(self.cib)
+        self.independent_cib.find(".//constraints").append(etree.XML("""
+            <rsc_some id="some_id" symmetrical="true">
+                  <resource_set id="pcs_rsc_set_A_B" role="Master">
+                      <resource_ref id="A"></resource_ref>
+                      <resource_ref id="B"></resource_ref>
+                  </resource_set>
+                  <resource_set action="start" id="pcs_rsc_set_E_F">
+                      <resource_ref id="E"></resource_ref>
+                      <resource_ref id="F"></resource_ref>
+                  </resource_set>
+            </rsc_some>
+        """))
+        assert_xml_equal(
+            etree.tostring(self.independent_cib).decode(),
+            etree.tostring(self.cib).decode()
+        )
+
+    def test_refuse_duplicate(self):
+        self.create()
+        self.env.push_cib.assert_called_once_with(self.cib)
+        assert_raise_library_error(self.create, (
+            severities.ERROR,
+            report_codes.DUPLICATE_CONSTRAINTS_EXIST,
+            {
+                'constraint_type': 'rsc_some',
+                'constraint_info_list': [{
+                    'options': {'symmetrical': 'true', 'id': 'some_id'},
+                    'resource_sets': [
+                        {
+                            'ids': ['A', 'B'],
+                            'options':{'role':'Master', 'id':'pcs_rsc_set_A_B'}
+                        },
+                        {
+                            'ids': ['E', 'F'],
+                            'options':{'action':'start', 'id':'pcs_rsc_set_E_F'}
+                        }
+                    ],
+                }]
+            },
+            report_codes.FORCE_CONSTRAINT_DUPLICATE
+        ))
+
+    def test_put_duplicate_constraint_when_duplication_allowed(self):
+        self.create()
+        self.create(duplication_alowed=True)
+        expected_calls = [
+            mock.call(self.cib),
+            mock.call(self.cib),
+        ]
+        self.assertEqual(self.env.push_cib.call_count, len(expected_calls))
+        self.env.push_cib.assert_has_calls(expected_calls)
+
+        constraint_section = self.independent_cib.find(".//constraints")
+        constraint_section.append(etree.XML("""
+            <rsc_some id="some_id" symmetrical="true">
+                <resource_set id="pcs_rsc_set_A_B" role="Master">
+                    <resource_ref id="A"></resource_ref>
+                    <resource_ref id="B"></resource_ref>
+                </resource_set>
+                <resource_set action="start" id="pcs_rsc_set_E_F">
+                    <resource_ref id="E"></resource_ref>
+                    <resource_ref id="F"></resource_ref>
+                </resource_set>
+            </rsc_some>
+        """))
+        constraint_section.append(etree.XML("""
+            <rsc_some id="some_id" symmetrical="true">
+                <resource_set id="pcs_rsc_set_A_B-1" role="Master">
+                    <resource_ref id="A"></resource_ref>
+                    <resource_ref id="B"></resource_ref>
+                </resource_set>
+                <resource_set action="start" id="pcs_rsc_set_E_F-1">
+                    <resource_ref id="E"></resource_ref>
+                    <resource_ref id="F"></resource_ref>
+                </resource_set>
+            </rsc_some>
+        """))
+        assert_xml_equal(
+            etree.tostring(self.independent_cib).decode(),
+            etree.tostring(self.cib).decode()
+        )
+
+class ShowTest(TestCase):
+    def setUp(self):
+        self.cib, self.constraint_section = fixture_cib_and_constraints()
+        self.env = fixture_env(self.cib)
+
+    def create(self, tag_name, resource_set_list):
+        constraint.create_with_set(
+            tag_name,
+            lambda cib, options, resource_set_list: options,
+            self.env,
+            resource_set_list,
+            {"id":"some_id", "symmetrical": "true"},
+        )
+
+    def test_returns_export_of_found_elements(self):
+        tag_name = "rsc_some"
+        self.create(tag_name, [
+            {"ids": ["A", "B"], "options": {"role": "Master"}},
+        ])
+        self.create(tag_name, [
+            {"ids": ["E", "F"], "options": {"action": "start"}},
+        ])
+        etree.SubElement(self.constraint_section, tag_name).attrib.update({
+            "id": "plain1", "is_plain": "true"
+        })
+
+        is_plain = lambda element: element.attrib.has_key("is_plain")
+
+        self.assertEqual(
+            constraint.show(tag_name, is_plain, self.env), {
+            'plain': [{"options": {'id': 'plain1', 'is_plain': 'true'}}],
+            'with_resource_sets': [
+                {
+                    'resource_sets': [{
+                        'ids': ['A', 'B'],
+                        'options': {'role': 'Master', 'id': 'pcs_rsc_set_A_B'},
+                    }],
+                    'options': {'symmetrical': 'true', 'id': 'some_id'}
+                },
+                {
+                    'options': {'symmetrical': 'true', 'id': 'some_id'},
+                    'resource_sets': [{
+                        'ids': ['E', 'F'],
+                        'options': {'action': 'start', 'id': 'pcs_rsc_set_E_F'}
+                    }]
+                }
+            ]
+        })
diff --git a/pcs/lib/commands/test/test_ticket.py b/pcs/lib/commands/test/test_ticket.py
new file mode 100644
index 0000000..a22a014
--- /dev/null
+++ b/pcs/lib/commands/test/test_ticket.py
@@ -0,0 +1,76 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import logging
+from unittest import TestCase
+
+from pcs.common import report_codes
+from pcs.lib.commands.constraint import ticket as ticket_command
+from pcs.lib.env import LibraryEnvironment as Env
+from pcs.lib.errors import ReportItemSeverity as severities
+from pcs.test.tools.assertions import (
+    assert_xml_equal,
+    assert_raise_library_error
+)
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.misc import get_test_resource as rc
+from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.xml import get_xml_manipulation_creator_from_file
+
+
+class CreateTest(TestCase):
+    def setUp(self):
+        self.mock_logger = mock.MagicMock(logging.Logger)
+        self.mock_reporter = MockLibraryReportProcessor()
+        self.create_cib = get_xml_manipulation_creator_from_file(
+            rc("cib-empty.xml")
+        )
+
+    def test_sucess_create(self):
+        resource_xml = '<primitive id="resourceA" class="service" type="exim"/>'
+        cib = (
+            self.create_cib()
+                .append_to_first_tag_name('resources', resource_xml)
+        )
+
+        env = Env(self.mock_logger, self.mock_reporter, cib_data=str(cib))
+        ticket_command.create(env, "ticketA", "resourceA", {
+            "loss-policy": "fence",
+            "rsc-role": "master"
+        })
+
+        assert_xml_equal(
+            env.get_cib_xml(),
+            str(cib.append_to_first_tag_name(
+                'constraints', """
+                    <rsc_ticket
+                        id="ticket-ticketA-resourceA-Master"
+                        rsc="resourceA"
+                        rsc-role="Master"
+                        ticket="ticketA"
+                        loss-policy="fence"
+                    />
+                """
+            ))
+        )
+
+    def test_refuse_for_nonexisting_resource(self):
+        env = Env(
+            self.mock_logger,
+            self.mock_reporter,
+            cib_data=str(self.create_cib())
+        )
+        assert_raise_library_error(
+            lambda: ticket_command.create(
+                env, "ticketA", "resourceA", "master", {"loss-policy": "fence"}
+            ),
+            (
+                severities.ERROR,
+                report_codes.RESOURCE_DOES_NOT_EXIST,
+                {"resource_id": "resourceA"},
+            ),
+        )
diff --git a/pcs/lib/corosync/__init__.py b/pcs/lib/corosync/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/lib/corosync/config_facade.py b/pcs/lib/corosync/config_facade.py
new file mode 100644
index 0000000..ff8d33b
--- /dev/null
+++ b/pcs/lib/corosync/config_facade.py
@@ -0,0 +1,501 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.common import report_codes
+from pcs.lib import reports
+from pcs.lib.errors import ReportItemSeverity, LibraryError
+from pcs.lib.corosync import config_parser
+from pcs.lib.node import NodeAddresses, NodeAddressesList
+
+class ConfigFacade(object):
+    """
+    Provides high level access to a corosync config file
+    """
+
+    QUORUM_OPTIONS = (
+        "auto_tie_breaker",
+        "last_man_standing",
+        "last_man_standing_window",
+        "wait_for_all",
+    )
+
+    @classmethod
+    def from_string(cls, config_string):
+        """
+        Parse corosync config and create a facade around it
+        config_string corosync config text
+        """
+        try:
+            return cls(config_parser.parse_string(config_string))
+        except config_parser.MissingClosingBraceException:
+            raise LibraryError(
+                reports.corosync_config_parser_missing_closing_brace()
+            )
+        except config_parser.UnexpectedClosingBraceException:
+            raise LibraryError(
+                reports.corosync_config_parser_unexpected_closing_brace()
+            )
+        except config_parser.CorosyncConfParserException:
+            raise LibraryError(
+                reports.corosync_config_parser_other_error()
+            )
+
+    def __init__(self, parsed_config):
+        """
+        Create a facade around a parsed corosync config file
+        parsed_config parsed corosync config
+        """
+        self._config = parsed_config
+        # set to True if changes cannot be applied on running cluster
+        self._need_stopped_cluster = False
+
+    @property
+    def config(self):
+        return self._config
+
+    @property
+    def need_stopped_cluster(self):
+        return self._need_stopped_cluster
+
+    def get_nodes(self):
+        """
+        Get all defined nodes
+        """
+        result = NodeAddressesList()
+        for nodelist in self.config.get_sections("nodelist"):
+            for node in nodelist.get_sections("node"):
+                node_data = {
+                    "ring0_addr": None,
+                    "ring1_addr": None,
+                    "name": None,
+                    "nodeid": None,
+                }
+                for attr_name, attr_value in node.get_attributes():
+                    if attr_name in node_data:
+                        node_data[attr_name] = attr_value
+                result.append(NodeAddresses(
+                    node_data["ring0_addr"],
+                    node_data["ring1_addr"],
+                    node_data["name"],
+                    node_data["nodeid"]
+                ))
+        return result
+
+    def set_quorum_options(self, report_processor, options):
+        """
+        Set options in quorum section
+        options quorum options dict
+        """
+        report_processor.process_list(
+            self.__validate_quorum_options(options)
+        )
+        quorum_section_list = self.__ensure_section(self.config, "quorum")
+        self.__set_section_options(quorum_section_list, options)
+        self.__update_two_node()
+        self.__remove_empty_sections(self.config)
+        self._need_stopped_cluster = True
+
+    def get_quorum_options(self):
+        """
+        Get configurable options from quorum section
+        """
+        options = {}
+        for section in self.config.get_sections("quorum"):
+            for name, value in section.get_attributes():
+                if name in self.__class__.QUORUM_OPTIONS:
+                    options[name] = value
+        return options
+
+    def __validate_quorum_options(self, options):
+        report_items = []
+        for name, value in sorted(options.items()):
+
+            allowed_names = self.__class__.QUORUM_OPTIONS
+            if name not in allowed_names:
+                report_items.append(
+                    reports.invalid_option(name, allowed_names, "quorum")
+                )
+                continue
+
+            if value == "":
+                continue
+
+            if name == "last_man_standing_window":
+                if not value.isdigit():
+                    report_items.append(reports.invalid_option_value(
+                        name, value, "positive integer"
+                    ))
+
+            else:
+                allowed_values = ("0", "1")
+                if value not in allowed_values:
+                    report_items.append(reports.invalid_option_value(
+                        name, value, allowed_values
+                    ))
+
+        return report_items
+
+    def has_quorum_device(self):
+        """
+        Check if quorum device is present in the config
+        """
+        for quorum in self.config.get_sections("quorum"):
+            for device in quorum.get_sections("device"):
+                if device.get_attributes("model"):
+                    return True
+        return False
+
+    def get_quorum_device_settings(self):
+        """
+        Get configurable options from quorum.device section
+        """
+        model = None
+        model_options = {}
+        generic_options = {}
+        for quorum in self.config.get_sections("quorum"):
+            for device in quorum.get_sections("device"):
+                for name, value in device.get_attributes():
+                    if name == "model":
+                        model = value
+                    else:
+                        generic_options[name] = value
+                for subsection in device.get_sections():
+                    if subsection.name not in model_options:
+                        model_options[subsection.name] = {}
+                    model_options[subsection.name].update(
+                        subsection.get_attributes()
+                    )
+        return model, model_options.get(model, {}), generic_options
+
+    def add_quorum_device(
+        self, report_processor, model, model_options, generic_options,
+        force_model=False, force_options=False,
+    ):
+        """
+        Add quorum device configuration
+        model quorum device model
+        model_options model specific options dict
+        generic_options generic quorum device options dict
+        force_model continue even if the model is not valid
+        force_options continue even if options are not valid
+        """
+        # validation
+        if self.has_quorum_device():
+            raise LibraryError(reports.qdevice_already_defined())
+        report_processor.process_list(
+            self.__validate_quorum_device_model(model, force_model)
+            +
+            self.__validate_quorum_device_model_options(
+                model,
+                model_options,
+                need_required=True,
+                force=force_options
+            )
+            +
+            self.__validate_quorum_device_generic_options(
+                generic_options,
+                force=force_options
+            )
+        )
+        # configuration cleanup
+        remove_need_stopped_cluster = {
+            "auto_tie_breaker": "",
+            "last_man_standing": "",
+            "last_man_standing_window": "",
+        }
+        need_stopped_cluster = False
+        quorum_section_list = self.__ensure_section(self.config, "quorum")
+        for quorum in quorum_section_list:
+            for device in quorum.get_sections("device"):
+                quorum.del_section(device)
+            for name, value in quorum.get_attributes():
+                if (
+                    name in remove_need_stopped_cluster
+                    and
+                    value not in ["", "0"]
+                ):
+                    need_stopped_cluster = True
+        attrs_to_remove = {
+            "allow_downscale": "",
+            "two_node": "",
+        }
+        attrs_to_remove.update(remove_need_stopped_cluster)
+        self.__set_section_options(quorum_section_list, attrs_to_remove)
+        # add new configuration
+        quorum = quorum_section_list[-1]
+        new_device = config_parser.Section("device")
+        quorum.add_section(new_device)
+        self.__set_section_options([new_device], generic_options)
+        new_device.set_attribute("model", model)
+        new_model = config_parser.Section(model)
+        self.__set_section_options([new_model], model_options)
+        new_device.add_section(new_model)
+        self.__update_two_node()
+        self.__remove_empty_sections(self.config)
+        # update_two_node sets self._need_stopped_cluster when changing an
+        # algorithm lms <-> 2nodelms. We don't care about that, it's not really
+        # a change, as there was no qdevice before. So we override it.
+        self._need_stopped_cluster = need_stopped_cluster
+
+    def update_quorum_device(
+        self, report_processor, model_options, generic_options,
+        force_options=False
+    ):
+        """
+        Update existing quorum device configuration
+        model_options model specific options dict
+        generic_options generic quorum device options dict
+        force_options continue even if options are not valid
+        """
+        # validation
+        if not self.has_quorum_device():
+            raise LibraryError(reports.qdevice_not_defined())
+        model = None
+        for quorum in self.config.get_sections("quorum"):
+            for device in quorum.get_sections("device"):
+                for dummy_name, value in device.get_attributes("model"):
+                    model = value
+        report_processor.process_list(
+            self.__validate_quorum_device_model_options(
+                model,
+                model_options,
+                need_required=False,
+                force=force_options
+            )
+            +
+            self.__validate_quorum_device_generic_options(
+                generic_options,
+                force=force_options
+            )
+        )
+        # set new configuration
+        device_sections = []
+        model_sections = []
+        for quorum in self.config.get_sections("quorum"):
+            device_sections.extend(quorum.get_sections("device"))
+            for device in quorum.get_sections("device"):
+                model_sections.extend(device.get_sections(model))
+        self.__set_section_options(device_sections, generic_options)
+        self.__set_section_options(model_sections, model_options)
+        self.__update_two_node()
+        self.__remove_empty_sections(self.config)
+        self._need_stopped_cluster = True
+
+    def remove_quorum_device(self):
+        """
+        Remove all quorum device configuration
+        """
+        if not self.has_quorum_device():
+            raise LibraryError(reports.qdevice_not_defined())
+        for quorum in self.config.get_sections("quorum"):
+            for device in quorum.get_sections("device"):
+                quorum.del_section(device)
+        self.__update_two_node()
+        self.__remove_empty_sections(self.config)
+
+    def __validate_quorum_device_model(self, model, force_model=False):
+        report_items = []
+
+        allowed_values = (
+            "net",
+        )
+        if model not in allowed_values:
+            report_items.append(reports.invalid_option_value(
+                "model",
+                model,
+                allowed_values,
+                ReportItemSeverity.WARNING if force_model
+                    else ReportItemSeverity.ERROR,
+                None if force_model else report_codes.FORCE_QDEVICE_MODEL
+            ))
+
+        return report_items
+
+    def __validate_quorum_device_model_options(
+        self, model, model_options, need_required, force=False
+    ):
+        if model == "net":
+            return self.__validate_quorum_device_model_net_options(
+                model_options,
+                need_required,
+                force
+            )
+        return []
+
+    def __validate_quorum_device_model_net_options(
+        self, model_options, need_required, force=False
+    ):
+        required_options = frozenset(["host"])
+        optional_options = frozenset([
+            "algorithm",
+            "connect_timeout",
+            "force_ip_version",
+            "port",
+            "tie_breaker",
+        ])
+        allowed_options = required_options | optional_options
+        model_options_names = frozenset(model_options.keys())
+        report_items = []
+        severity = (
+            ReportItemSeverity.WARNING if force else ReportItemSeverity.ERROR
+        )
+        forceable = None if force else report_codes.FORCE_OPTIONS
+
+        if need_required:
+            for missing in sorted(required_options - model_options_names):
+                report_items.append(reports.required_option_is_missing(missing))
+
+        for name, value in sorted(model_options.items()):
+            if name not in allowed_options:
+                report_items.append(reports.invalid_option(
+                    name,
+                    allowed_options,
+                    "quorum device model",
+                    severity,
+                    forceable
+                ))
+                continue
+
+            if value == "":
+                # do not allow to remove required options
+                if name in required_options:
+                    report_items.append(
+                        reports.required_option_is_missing(name)
+                    )
+                else:
+                    continue
+
+            if name == "algorithm":
+                allowed_values = ("2nodelms", "ffsplit", "lms")
+                if value not in allowed_values:
+                    report_items.append(reports.invalid_option_value(
+                        name, value, allowed_values, severity, forceable
+                    ))
+
+            if name == "connect_timeout":
+                minimum, maximum = 1000, 2*60*1000
+                if not (value.isdigit() and minimum <= int(value) <= maximum):
+                    min_max = "{min}-{max}".format(min=minimum, max=maximum)
+                    report_items.append(reports.invalid_option_value(
+                        name, value, min_max, severity, forceable
+                    ))
+
+            if name == "force_ip_version":
+                allowed_values = ("0", "4", "6")
+                if value not in allowed_values:
+                    report_items.append(reports.invalid_option_value(
+                        name, value, allowed_values, severity, forceable
+                    ))
+
+            if name == "port":
+                minimum, maximum = 1, 65535
+                if not (value.isdigit() and minimum <= int(value) <= maximum):
+                    min_max = "{min}-{max}".format(min=minimum, max=maximum)
+                    report_items.append(reports.invalid_option_value(
+                        name, value, min_max, severity, forceable
+                    ))
+
+            if name == "tie_breaker":
+                node_ids = [node.id for node in self.get_nodes()]
+                allowed_nonid = ["lowest", "highest"]
+                if value not in allowed_nonid + node_ids:
+                    allowed_values = allowed_nonid + ["valid node id"]
+                    report_items.append(reports.invalid_option_value(
+                        name, value, allowed_values, severity, forceable
+                    ))
+
+        return report_items
+
+    def __validate_quorum_device_generic_options(
+        self, generic_options, force=False
+    ):
+        optional_options = frozenset([
+            "sync_timeout",
+            "timeout",
+        ])
+        allowed_options = optional_options
+        report_items = []
+        severity = (
+            ReportItemSeverity.WARNING if force else ReportItemSeverity.ERROR
+        )
+        forceable = None if force else report_codes.FORCE_OPTIONS
+
+        for name, value in sorted(generic_options.items()):
+            if name not in allowed_options:
+                # model is never allowed in generic options, it is passed
+                # in its own argument
+                report_items.append(reports.invalid_option(
+                    name,
+                    allowed_options,
+                    "quorum device",
+                    severity if name != "model" else ReportItemSeverity.ERROR,
+                    forceable if name != "model" else None
+                ))
+                continue
+
+            if value == "":
+                continue
+
+            if not value.isdigit():
+                report_items.append(reports.invalid_option_value(
+                    name, value, "positive integer", severity, forceable
+                ))
+
+        return report_items
+
+    def __update_two_node(self):
+        # get relevant status
+        has_quorum_device = self.has_quorum_device()
+        has_two_nodes = len(self.get_nodes()) == 2
+        auto_tie_breaker = False
+        for quorum in self.config.get_sections("quorum"):
+            for attr in quorum.get_attributes("auto_tie_breaker"):
+                auto_tie_breaker = attr[1] != "0"
+        # update two_node
+        if has_two_nodes and not auto_tie_breaker and not has_quorum_device:
+            quorum_section_list = self.__ensure_section(self.config, "quorum")
+            self.__set_section_options(quorum_section_list, {"two_node": "1"})
+        else:
+            for quorum in self.config.get_sections("quorum"):
+                quorum.del_attributes_by_name("two_node")
+        # update qdevice algorithm "lms" vs "2nodelms"
+        for quorum in self.config.get_sections("quorum"):
+            for device in quorum.get_sections("device"):
+                for net in device.get_sections("net"):
+                    algorithm = None
+                    for dummy_name, value in net.get_attributes("algorithm"):
+                        algorithm = value
+                    if algorithm == "lms" and has_two_nodes:
+                        net.set_attribute("algorithm", "2nodelms")
+                        self._need_stopped_cluster = True
+                    elif algorithm == "2nodelms" and not has_two_nodes:
+                        net.set_attribute("algorithm", "lms")
+                        self._need_stopped_cluster = True
+
+    def __set_section_options(self, section_list, options):
+        for section in section_list[:-1]:
+            for name in options:
+                section.del_attributes_by_name(name)
+        for name, value in sorted(options.items()):
+            if value == "":
+                section_list[-1].del_attributes_by_name(name)
+            else:
+                section_list[-1].set_attribute(name, value)
+
+    def __ensure_section(self, parent_section, section_name):
+        section_list = parent_section.get_sections(section_name)
+        if not section_list:
+            new_section = config_parser.Section(section_name)
+            parent_section.add_section(new_section)
+            section_list.append(new_section)
+        return section_list
+
+    def __remove_empty_sections(self, parent_section):
+        for section in parent_section.get_sections():
+            self.__remove_empty_sections(section)
+            if section.empty:
+                parent_section.del_section(section)
diff --git a/pcs/corosync_conf.py b/pcs/lib/corosync/config_parser.py
similarity index 85%
rename from pcs/corosync_conf.py
rename to pcs/lib/corosync/config_parser.py
index 1debc39..7604ba8 100644
--- a/pcs/corosync_conf.py
+++ b/pcs/lib/corosync/config_parser.py
@@ -1,7 +1,9 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
 
 class Section(object):
@@ -20,6 +22,10 @@ class Section(object):
     def name(self):
         return self._name
 
+    @property
+    def empty(self):
+        return not self._attr_list and not self._section_list
+
     def export(self, indent="    "):
         lines = []
         for attr in self._attr_list:
@@ -126,28 +132,33 @@ def _parse_section(lines, section):
         if not current_line or current_line[0] == "#":
             continue
         if "{" in current_line:
-            section_name, junk = current_line.rsplit("{", 1)
+            section_name, dummy_junk = current_line.rsplit("{", 1)
             new_section = Section(section_name.strip())
             section.add_section(new_section)
             _parse_section(lines, new_section)
         elif "}" in current_line:
             if not section.parent:
-                raise ParseErrorException("Unexpected closing brace")
+                raise UnexpectedClosingBraceException()
             return
         elif ":" in current_line:
             section.add_attribute(
                 *[x.strip() for x in current_line.split(":", 1)]
             )
     if section.parent:
-        raise ParseErrorException("Missing closing brace")
+        raise MissingClosingBraceException()
 
 
-class CorosyncConfException(Exception):
+class CorosyncConfParserException(Exception):
     pass
 
-class CircularParentshipException(CorosyncConfException):
+class CircularParentshipException(CorosyncConfParserException):
     pass
 
-class ParseErrorException(CorosyncConfException):
+class ParseErrorException(CorosyncConfParserException):
     pass
 
+class MissingClosingBraceException(ParseErrorException):
+    pass
+
+class UnexpectedClosingBraceException(ParseErrorException):
+    pass
diff --git a/pcs/lib/corosync/live.py b/pcs/lib/corosync/live.py
new file mode 100644
index 0000000..2446a46
--- /dev/null
+++ b/pcs/lib/corosync/live.py
@@ -0,0 +1,49 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import os.path
+
+from pcs import settings
+from pcs.lib import reports
+from pcs.lib.errors import LibraryError
+from pcs.lib.external import NodeCommunicator
+
+def get_local_corosync_conf():
+    """
+    Read corosync.conf file from local machine
+    """
+    path = settings.corosync_conf_file
+    try:
+        return open(path).read()
+    except IOError as e:
+        raise LibraryError(reports.corosync_config_read_error(path, e.strerror))
+
+def set_remote_corosync_conf(node_communicator, node_addr, config_text):
+    """
+    Send corosync.conf to a node
+    node_addr instance of NodeAddresses
+    config_text corosync.conf text
+    """
+    dummy_response = node_communicator.call_node(
+        node_addr,
+        "remote/set_corosync_conf",
+        NodeCommunicator.format_data_dict({'corosync_conf': config_text})
+    )
+
+def reload_config(runner):
+    """
+    Ask corosync to reload its configuration
+    """
+    output, retval = runner.run([
+        os.path.join(settings.corosync_binaries, "corosync-cfgtool"),
+        "-R"
+    ])
+    if retval != 0 or "invalid option" in output:
+        raise LibraryError(
+            reports.corosync_config_reload_error(output.rstrip())
+        )
+
diff --git a/pcs/lib/env.py b/pcs/lib/env.py
new file mode 100644
index 0000000..99e3397
--- /dev/null
+++ b/pcs/lib/env.py
@@ -0,0 +1,168 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from lxml import etree
+
+from pcs.lib import reports
+from pcs.lib.external import (
+    is_cman_cluster,
+    CommandRunner,
+    NodeCommunicator,
+)
+from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade
+from pcs.lib.corosync.live import (
+    get_local_corosync_conf,
+    reload_config as reload_corosync_config,
+)
+from pcs.lib.nodes_task import (
+    distribute_corosync_conf,
+    check_corosync_offline_on_nodes,
+)
+from pcs.lib.pacemaker import (
+    get_cib,
+    get_cib_xml,
+    replace_cib_configuration_xml,
+)
+
+
+class LibraryEnvironment(object):
+    # pylint: disable=too-many-instance-attributes
+
+    def __init__(
+        self,
+        logger,
+        report_processor,
+        user_login=None,
+        user_groups=None,
+        cib_data=None,
+        corosync_conf_data=None,
+        auth_tokens_getter=None,
+    ):
+        self._logger = logger
+        self._report_processor = report_processor
+        self._user_login = user_login
+        self._user_groups = [] if user_groups is None else user_groups
+        self._cib_data = cib_data
+        self._corosync_conf_data = corosync_conf_data
+        self._is_cman_cluster = None
+        # TODO tokens probably should not be inserted from outside, but we're
+        # postponing dealing with them, because it's not that easy to move
+        # related code currently - it's in pcsd
+        self._auth_tokens_getter = auth_tokens_getter
+        self._auth_tokens = None
+
+    @property
+    def logger(self):
+        return self._logger
+
+    @property
+    def report_processor(self):
+        return self._report_processor
+
+    @property
+    def user_login(self):
+        return self._user_login
+
+    @property
+    def user_groups(self):
+        return self._user_groups
+
+    @property
+    def is_cman_cluster(self):
+        if self._is_cman_cluster is None:
+            self._is_cman_cluster = is_cman_cluster(self.cmd_runner())
+        return self._is_cman_cluster
+
+    def get_cib_xml(self):
+        if self.is_cib_live:
+            return get_cib_xml(self.cmd_runner())
+        else:
+            return self._cib_data
+
+    def get_cib(self):
+        return get_cib(self.get_cib_xml())
+
+    def push_cib_xml(self, cib_data):
+        if self.is_cib_live:
+            replace_cib_configuration_xml(self.cmd_runner(), cib_data)
+        else:
+            self._cib_data = cib_data
+
+    def push_cib(self, cib):
+        #etree returns bytes: b'xml'
+        #python 3 removed .encode() from bytes
+        #run(...) calls subprocess.Popen.communicate which calls encode...
+        #so here is bytes to str conversion
+        self.push_cib_xml(etree.tostring(cib).decode())
+
+    @property
+    def is_cib_live(self):
+        return self._cib_data is None
+
+    def get_corosync_conf_data(self):
+        if self._corosync_conf_data is None:
+            return get_local_corosync_conf()
+        else:
+            return self._corosync_conf_data
+
+    def get_corosync_conf(self):
+        return CorosyncConfigFacade.from_string(self.get_corosync_conf_data())
+
+    def push_corosync_conf(
+        self, corosync_conf_facade, skip_offline_nodes=False
+    ):
+        corosync_conf_data = corosync_conf_facade.config.export()
+        if self.is_corosync_conf_live:
+            node_list = corosync_conf_facade.get_nodes()
+            if corosync_conf_facade.need_stopped_cluster:
+                check_corosync_offline_on_nodes(
+                    self.node_communicator(),
+                    self.report_processor,
+                    node_list,
+                    skip_offline_nodes
+                )
+            distribute_corosync_conf(
+                self.node_communicator(),
+                self.report_processor,
+                node_list,
+                corosync_conf_data,
+                skip_offline_nodes
+            )
+            if not corosync_conf_facade.need_stopped_cluster:
+                reload_corosync_config(self.cmd_runner())
+                self.report_processor.process(
+                    reports.corosync_config_reloaded()
+                )
+        else:
+            self._corosync_conf_data = corosync_conf_data
+
+    @property
+    def is_corosync_conf_live(self):
+        return self._corosync_conf_data is None
+
+    def cmd_runner(self):
+        runner_env = dict()
+        if self.user_login:
+            runner_env["CIB_user"] = self.user_login
+        return CommandRunner(self.logger, self.report_processor, runner_env)
+
+    def node_communicator(self):
+        return NodeCommunicator(
+            self.logger,
+            self.report_processor,
+            self.__get_auth_tokens(),
+            self.user_login,
+            self.user_groups
+        )
+
+    def __get_auth_tokens(self):
+        if self._auth_tokens is None:
+            if self._auth_tokens_getter:
+                self._auth_tokens = self._auth_tokens_getter()
+            else:
+                self._auth_tokens = {}
+        return self._auth_tokens
diff --git a/pcs/errors.py b/pcs/lib/errors.py
similarity index 65%
rename from pcs/errors.py
rename to pcs/lib/errors.py
index 8c93cb0..c0bd3d1 100644
--- a/pcs/errors.py
+++ b/pcs/lib/errors.py
@@ -1,18 +1,18 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
-import usage
-import error_codes
-
-class CmdLineInputError(Exception):
+class LibraryError(Exception):
     pass
 
 class ReportItemSeverity(object):
     ERROR = 'ERROR'
     WARNING = 'WARNING'
     INFO = 'INFO'
+    DEBUG = 'DEBUG'
 
 class ReportItem(object):
     @classmethod
@@ -27,8 +27,12 @@ class ReportItem(object):
     def info(cls, code, message_pattern, **kwargs):
         return cls(code, ReportItemSeverity.INFO, message_pattern, **kwargs)
 
+    @classmethod
+    def debug(cls, code, message_pattern, **kwargs):
+        return cls(code, ReportItemSeverity.DEBUG, message_pattern, **kwargs)
+
     def __init__(
-        self, code, severity, message_pattern, forceable=False, info=None
+        self, code, severity, message_pattern, forceable=None, info=None
     ):
         self.code = code
         self.severity = severity
@@ -36,3 +40,6 @@ class ReportItem(object):
         self.message_pattern=message_pattern
         self.info = info if info else dict()
         self.message = self.message_pattern.format(**self.info)
+
+    def __repr__(self):
+        return self.code+": "+str(self.info)
diff --git a/pcs/lib/exchange_formats.md b/pcs/lib/exchange_formats.md
new file mode 100644
index 0000000..ebfc288
--- /dev/null
+++ b/pcs/lib/exchange_formats.md
@@ -0,0 +1,34 @@
+Exchange formats
+================
+Library exchanges with the client data in the formats described below.
+
+Resource set
+------------
+Dictionary with keys are "options" and "ids".
+On the key "options" is a dictionary of resource set options.
+On the key "ids" is a list of resource id.
+```python
+{
+  "options": {"id": "id"},
+  "ids": ["resourceA", "resourceB"],
+}
+```
+
+Constraint
+----------
+When constraint is plain (without resource sets) there is only dictionary with
+constraint options.
+```python
+{"id": "id", "rsc": "resourceA"}
+```
+
+When is constraint with resource sets there is dictionary with keys
+"resource_sets" and  "options".
+On the key "options" is a dictionary of constraint options.
+On the key "resource_sets" is a dictionary of resource sets (see Resource set).
+```python
+{
+  "options": {"id": "id"},
+  "resource_sets": {"options": {"id": "id"}, "ids": ["resourceA", "resourceB"]},
+}
+```
diff --git a/pcs/lib/external.py b/pcs/lib/external.py
new file mode 100644
index 0000000..231cd9c
--- /dev/null
+++ b/pcs/lib/external.py
@@ -0,0 +1,345 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import base64
+import inspect
+import json
+import os
+try:
+    # python 2
+    from pipes import quote as shell_quote
+except ImportError:
+    # python 3
+    from shlex import quote as shell_quote
+import re
+import signal
+import ssl
+import subprocess
+import sys
+try:
+    # python2
+    from urllib import urlencode as urllib_urlencode
+except ImportError:
+    # python3
+    from urllib.parse import urlencode as urllib_urlencode
+try:
+    # python2
+    from urllib2 import (
+        build_opener as urllib_build_opener,
+        HTTPCookieProcessor as urllib_HTTPCookieProcessor,
+        HTTPSHandler as urllib_HTTPSHandler,
+        HTTPError as urllib_HTTPError,
+        URLError as urllib_URLError
+    )
+except ImportError:
+    # python3
+    from urllib.request import (
+        build_opener as urllib_build_opener,
+        HTTPCookieProcessor as urllib_HTTPCookieProcessor,
+        HTTPSHandler as urllib_HTTPSHandler
+    )
+    from urllib.error import (
+        HTTPError as urllib_HTTPError,
+        URLError as urllib_URLError
+    )
+
+from pcs.lib import reports
+from pcs.lib.errors import LibraryError, ReportItemSeverity
+from pcs import settings
+
+
+def is_path_runnable(path):
+    return os.path.isfile(path) and os.access(path, os.X_OK)
+
+
+def is_cman_cluster(runner):
+    """
+    Detect if underlaying locally installed cluster is CMAN based
+    """
+    # Checking corosync version works in most cases and supports non-rhel
+    # distributions as well as running (manually compiled) corosync2 on rhel6.
+    # - corosync2 does not support cman at all
+    # - corosync1 runs with cman on rhel6
+    # - corosync1 can be used without cman, but we don't support it anyways
+    # - corosync2 is the default result if errors occur
+    output, retval = runner.run([
+        os.path.join(settings.corosync_binaries, "corosync"),
+        "-v"
+    ])
+    if retval != 0:
+        return False
+    match = re.search(r"version\D+(\d+)", output)
+    return match is not None and match.group(1) == "1"
+
+
+class CommandRunner(object):
+    def __init__(self, logger, reporter, env_vars=None):
+        self._logger = logger
+        self._reporter = reporter
+        self._env_vars = env_vars if env_vars else dict()
+        self._python2 = sys.version[0] == "2"
+
+    def run(
+        self, args, ignore_stderr=False, stdin_string=None, env_extend=None,
+        binary_output=False
+    ):
+        env_vars = dict(env_extend) if env_extend else dict()
+        env_vars.update(self._env_vars)
+
+        log_args = " ".join([shell_quote(x) for x in args])
+        msg = "Running: {args}"
+        if stdin_string:
+            msg += "\n--Debug Input Start--\n{stdin}\n--Debug Input End--"
+        self._logger.debug(msg.format(args=log_args, stdin=stdin_string))
+        self._reporter.process(
+            reports.run_external_process_started(log_args, stdin_string)
+        )
+
+        try:
+            process = subprocess.Popen(
+                args,
+                # Some commands react differently if they get anything via stdin
+                stdin=(subprocess.PIPE if stdin_string is not None else None),
+                stdout=subprocess.PIPE,
+                stderr=(
+                    subprocess.PIPE if ignore_stderr else subprocess.STDOUT
+                ),
+                preexec_fn=(
+                    lambda: signal.signal(signal.SIGPIPE, signal.SIG_DFL)
+                ),
+                close_fds=True,
+                shell=False,
+                env=env_vars,
+                # decodes newlines and in python3 also converts bytes to str
+                universal_newlines=(not self._python2 and not binary_output)
+            )
+            output, dummy_stderror = process.communicate(stdin_string)
+            retval = process.returncode
+        except OSError as e:
+            raise LibraryError(
+                reports.run_external_process_error(log_args, e.strerror)
+            )
+
+        self._logger.debug(
+            (
+                "Finished running: {args}\nReturn value: {retval}"
+                + "\n--Debug Output Start--\n{output}\n--Debug Output End--"
+            ).format(args=log_args, retval=retval, output=output)
+        )
+        self._reporter.process(
+            reports.run_external_process_finished(log_args, retval, output)
+        )
+        return output, retval
+
+
+class NodeCommunicationException(Exception):
+    # pylint: disable=super-init-not-called
+    def __init__(self, node, command, reason):
+        self.node = node
+        self.command = command
+        self.reason = reason
+
+
+class NodeConnectionException(NodeCommunicationException):
+    pass
+
+
+class NodeAuthenticationException(NodeCommunicationException):
+    pass
+
+
+class NodePermissionDeniedException(NodeCommunicationException):
+    pass
+
+
+class NodeUnsupportedCommandException(NodeCommunicationException):
+    pass
+
+
+def node_communicator_exception_to_report_item(
+    e, severity=ReportItemSeverity.ERROR, forceable=None
+):
+    """
+    Transform NodeCommunicationException to ReportItem
+    """
+    exception_to_report = {
+        NodeAuthenticationException:
+            reports.node_communication_error_not_authorized,
+        NodePermissionDeniedException:
+            reports.node_communication_error_permission_denied,
+        NodeUnsupportedCommandException:
+            reports.node_communication_error_unsupported_command,
+        NodeCommunicationException:
+            reports.node_communication_error_other_error,
+        NodeConnectionException:
+            reports.node_communication_error_unable_to_connect,
+    }
+    if e.__class__ in exception_to_report:
+        return exception_to_report[e.__class__](
+            e.node,
+            e.command,
+            e.reason,
+            severity,
+            forceable
+        )
+    raise e
+
+class NodeCommunicator(object):
+    """
+    Sends requests to nodes
+    """
+
+    @classmethod
+    def format_data_dict(cls, data):
+        """
+        Encode data for transport (only plain dict is supported)
+        """
+        return urllib_urlencode(data)
+
+    @classmethod
+    def format_data_json(cls, data):
+        """
+        Encode data for transport (more complex data than in format_data_dict)
+        """
+        return json.dumps(data)
+
+    def __init__(self, logger, reporter, auth_tokens, user=None, groups=None):
+        """
+        auth_tokens authorization tokens for nodes: {node: token}
+        user username
+        groups groups the user is member of
+        """
+        self._logger = logger
+        self._reporter = reporter
+        self._auth_tokens = auth_tokens
+        self._user = user
+        self._groups = groups
+
+    def call_node(self, node_addr, request, data):
+        """
+        Send a request to a node
+        node_addr destination node, instance of NodeAddresses
+        request command to be run on the node
+        data command parameters, encoded by format_data_* method
+        """
+        return self.call_host(node_addr.ring0, request, data)
+
+    def call_host(self, host, request, data):
+        """
+        Send a request to a host
+        host host address
+        request command to be run on the host
+        data command parameters, encoded by format_data_* method
+        """
+        opener = self.__get_opener()
+        url = "https://{host}:2224/{request}".format(
+            host=("[{0}]".format(host) if ":" in host else host),
+            request=request
+        )
+        cookies = self.__prepare_cookies(host)
+        if cookies:
+            opener.addheaders.append(("Cookie", ";".join(cookies)))
+
+        msg = "Sending HTTP Request to: {url}"
+        if data:
+            msg += "\n--Debug Input Start--\n{data}\n--Debug Input End--"
+        self._logger.debug(msg.format(url=url, data=data))
+        self._reporter.process(
+            reports.node_communication_started(url, data)
+        )
+        result_msg = (
+            "Finished calling: {url}\nResponse Code: {code}"
+            + "\n--Debug Response Start--\n{response}\n--Debug Response End--"
+        )
+
+        try:
+            # python3 requires data to be bytes not str
+            if data:
+                data = data.encode("utf-8")
+            result = opener.open(url, data)
+            # python3 returns bytes not str
+            response_data = result.read().decode("utf-8")
+            self._logger.debug(result_msg.format(
+                url=url,
+                code=result.getcode(),
+                response=response_data
+            ))
+            self._reporter.process(
+                reports.node_communication_finished(
+                    url, result.getcode(), response_data
+                )
+            )
+            return response_data
+        except urllib_HTTPError as e:
+            # python3 returns bytes not str
+            response_data = e.read().decode("utf-8")
+            self._logger.debug(result_msg.format(
+                url=url,
+                code=e.code,
+                response=response_data
+            ))
+            self._reporter.process(
+                reports.node_communication_finished(url, e.code, response_data)
+            )
+            if e.code == 401:
+                raise NodeAuthenticationException(
+                    host, request, "HTTP error: {0}".format(e.code)
+                )
+            elif e.code == 403:
+                raise NodePermissionDeniedException(
+                    host, request, "HTTP error: {0}".format(e.code)
+                )
+            elif e.code == 404:
+                raise NodeUnsupportedCommandException(
+                    host, request, "HTTP error: {0}".format(e.code)
+                )
+            else:
+                raise NodeCommunicationException(
+                    host, request, "HTTP error: {0}".format(e.code)
+                )
+        except urllib_URLError as e:
+            msg = "Unable to connect to {node} ({reason})"
+            self._logger.debug(msg.format(node=host, reason=e.reason))
+            self._reporter.process(
+                reports.node_communication_not_connected(host, e.reason)
+            )
+            raise NodeConnectionException(host, request, e.reason)
+
+    def __get_opener(self):
+        # enable self-signed certificates
+        # https://www.python.org/dev/peps/pep-0476/
+        # http://bugs.python.org/issue21308
+        if (
+            hasattr(ssl, "_create_unverified_context")
+            and
+            "context" in inspect.getargspec(urllib_HTTPSHandler.__init__).args
+        ):
+            opener = urllib_build_opener(
+                urllib_HTTPSHandler(context=ssl._create_unverified_context()),
+                urllib_HTTPCookieProcessor()
+            )
+        else:
+            opener = urllib_build_opener(urllib_HTTPCookieProcessor())
+        return opener
+
+    def __prepare_cookies(self, host):
+        # Let's be safe about characters in variables (they can come from env)
+        # and do base64. We cannot do it for CIB_user however to be backward
+        # compatible so we at least remove disallowed characters.
+        cookies = []
+        if host in self._auth_tokens:
+            cookies.append("token={0}".format(self._auth_tokens[host]))
+        if self._user:
+            cookies.append("CIB_user={0}".format(
+                re.sub(r"[^!-~]", "", self._user).replace(";", "")
+            ))
+        if self._groups:
+            cookies.append("CIB_user_groups={0}".format(
+                # python3 requires the value to be bytes not str
+                base64.b64encode(" ".join(self._groups).encode("utf-8"))
+            ))
+        return cookies
diff --git a/pcs/lib/node.py b/pcs/lib/node.py
new file mode 100644
index 0000000..0ddd405
--- /dev/null
+++ b/pcs/lib/node.py
@@ -0,0 +1,40 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+
+class NodeAddresses(object):
+    def __init__(self, ring0, ring1=None, name=None, id=None):
+        self.ring0 = ring0
+        self.ring1 = ring1
+        self.name = name
+        self.id = id
+
+    @property
+    def label(self):
+        return self.name if self.name else self.ring0
+
+class NodeAddressesList(object):
+    def __init__(self, node_addrs_list=None):
+        self._list = []
+        if node_addrs_list:
+            for node_addr in node_addrs_list:
+                self._list.append(node_addr)
+
+    def append(self, item):
+        self._list.append(item)
+
+    def __len__(self):
+        return self._list.__len__()
+
+    def __getitem__(self, key):
+        return self._list.__getitem__(key)
+
+    def __iter__(self):
+        return self._list.__iter__()
+
+    def __reversed__(self):
+        return self._list.__reversed__()
diff --git a/pcs/lib/nodes_task.py b/pcs/lib/nodes_task.py
new file mode 100644
index 0000000..d714d70
--- /dev/null
+++ b/pcs/lib/nodes_task.py
@@ -0,0 +1,117 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import json
+
+from pcs.common import report_codes
+from pcs.lib import reports
+from pcs.lib.errors import ReportItemSeverity
+from pcs.lib.external import (
+    NodeCommunicationException,
+    node_communicator_exception_to_report_item,
+)
+from pcs.lib.corosync import live as corosync_live
+
+
+def distribute_corosync_conf(
+    node_communicator, reporter, node_addr_list, config_text,
+    skip_offline_nodes=False
+):
+    """
+    Send corosync.conf to several cluster nodes
+    node_addr_list nodes to send config to (NodeAddressesList instance)
+    config_text text of corosync.conf
+    skip_offline_nodes don't raise an error if a node communication error occurs
+    """
+    failure_severity = ReportItemSeverity.ERROR
+    failure_forceable = report_codes.SKIP_OFFLINE_NODES
+    if skip_offline_nodes:
+        failure_severity = ReportItemSeverity.WARNING
+        failure_forceable = None
+
+    reporter.process(reports.corosync_config_distribution_started())
+    report_items = []
+    # TODO use parallel communication
+    for node in node_addr_list:
+        try:
+            corosync_live.set_remote_corosync_conf(
+                node_communicator,
+                node,
+                config_text
+            )
+            reporter.process(
+                reports.corosync_config_accepted_by_node(node.label)
+            )
+        except NodeCommunicationException as e:
+            report_items.append(
+                node_communicator_exception_to_report_item(
+                    e,
+                    failure_severity,
+                    failure_forceable
+                )
+            )
+            report_items.append(
+                reports.corosync_config_distribution_node_error(
+                    node.label,
+                    failure_severity,
+                    failure_forceable
+                )
+            )
+    reporter.process_list(report_items)
+
+def check_corosync_offline_on_nodes(
+    node_communicator, reporter, node_addr_list, skip_offline_nodes=False
+):
+    """
+    Check corosync is not running on cluster nodes
+    node_addr_list nodes to send config to (NodeAddressesList instance)
+    skip_offline_nodes don't raise an error if a node communication error occurs
+    """
+    failure_severity = ReportItemSeverity.ERROR
+    failure_forceable = report_codes.SKIP_OFFLINE_NODES
+    if skip_offline_nodes:
+        failure_severity = ReportItemSeverity.WARNING
+        failure_forceable = None
+
+    reporter.process(reports.corosync_not_running_check_started())
+    report_items = []
+    # TODO use parallel communication
+    for node in node_addr_list:
+        try:
+            status = node_communicator.call_node(node, "remote/status", "")
+            if not json.loads(status)["corosync"]:
+                reporter.process(
+                    reports.corosync_not_running_on_node_ok(node.label)
+                )
+            else:
+                report_items.append(
+                    reports.corosync_running_on_node_fail(node.label)
+                )
+        except NodeCommunicationException as e:
+            report_items.append(
+                node_communicator_exception_to_report_item(
+                    e,
+                    failure_severity,
+                    failure_forceable
+                )
+            )
+            report_items.append(
+                reports.corosync_not_running_check_node_error(
+                    node.label,
+                    failure_severity,
+                    failure_forceable
+                )
+            )
+        except (ValueError, LookupError):
+            report_items.append(
+                reports.corosync_not_running_check_node_error(
+                    node.label,
+                    failure_severity,
+                    failure_forceable
+                )
+            )
+    reporter.process_list(report_items)
diff --git a/pcs/lib/pacemaker.py b/pcs/lib/pacemaker.py
new file mode 100644
index 0000000..14745c5
--- /dev/null
+++ b/pcs/lib/pacemaker.py
@@ -0,0 +1,215 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import os.path
+from lxml import etree
+
+from pcs import settings
+from pcs.lib import reports
+from pcs.lib.errors import LibraryError
+from pcs.lib.pacemaker_state import ClusterState
+
+
+__EXITCODE_WAIT_TIMEOUT = 62
+__EXITCODE_CIB_SCOPE_VALID_BUT_NOT_PRESENT = 6
+__RESOURCE_CLEANUP_OPERATION_COUNT_THRESHOLD = 100
+
+class CrmMonErrorException(LibraryError):
+    pass
+
+# syntactic sugar for getting a full path to a pacemaker executable
+def __exec(name):
+    return os.path.join(settings.pacemaker_binaries, name)
+
+def get_cluster_status_xml(runner):
+    output, retval = runner.run(
+        [__exec("crm_mon"), "--one-shot", "--as-xml", "--inactive"]
+    )
+    if retval != 0:
+        raise CrmMonErrorException(
+            reports.cluster_state_cannot_load(retval, output)
+        )
+    return output
+
+def get_cib_xml(runner, scope=None):
+    command = [__exec("cibadmin"), "--local", "--query"]
+    if scope:
+        command.append("--scope={0}".format(scope))
+    output, retval = runner.run(command)
+    if retval != 0:
+        if retval == __EXITCODE_CIB_SCOPE_VALID_BUT_NOT_PRESENT and scope:
+            raise LibraryError(
+                reports.cib_load_error_scope_missing(scope, retval, output)
+            )
+        else:
+            raise LibraryError(reports.cib_load_error(retval, output))
+    return output
+
+def get_cib(xml):
+    try:
+        return etree.fromstring(xml)
+    except (etree.XMLSyntaxError, etree.DocumentInvalid):
+        raise LibraryError(reports.cib_load_error_invalid_format())
+
+def replace_cib_configuration_xml(runner, xml):
+    output, retval = runner.run(
+        [
+            __exec("cibadmin"),
+            "--replace", "--scope", "configuration", "--verbose", "--xml-pipe"
+        ],
+        stdin_string=xml
+    )
+    if retval != 0:
+        raise LibraryError(reports.cib_push_error(retval, output))
+
+def replace_cib_configuration(runner, tree):
+    #etree returns bytes: b'xml'
+    #python 3 removed .encode() from bytes
+    #run(...) calls subprocess.Popen.communicate which calls encode...
+    #so here is bytes to str conversion
+    xml = etree.tostring(tree).decode()
+    return replace_cib_configuration_xml(runner, xml)
+
+def get_local_node_status(runner):
+    try:
+        cluster_status = ClusterState(get_cluster_status_xml(runner))
+    except CrmMonErrorException:
+        return {"offline": True}
+    node_name = __get_local_node_name(runner)
+    for node_status in cluster_status.node_section.nodes:
+        if node_status.attrs.name == node_name:
+            result = {
+                "offline": False,
+            }
+            for attr in (
+                'id', 'name', 'type', 'online', 'standby', 'standby_onfail',
+                'maintenance', 'pending', 'unclean', 'shutdown', 'expected_up',
+                'is_dc', 'resources_running',
+            ):
+                result[attr] = getattr(node_status.attrs, attr)
+            return result
+    raise LibraryError(reports.node_not_found(node_name))
+
+def resource_cleanup(runner, resource=None, node=None, force=False):
+    if not force and not node and not resource:
+        summary = ClusterState(get_cluster_status_xml(runner)).summary
+        operations = summary.nodes.attrs.count * summary.resources.attrs.count
+        if operations > __RESOURCE_CLEANUP_OPERATION_COUNT_THRESHOLD:
+            raise LibraryError(
+                reports.resource_cleanup_too_time_consuming(
+                    __RESOURCE_CLEANUP_OPERATION_COUNT_THRESHOLD
+                )
+            )
+
+    cmd = [__exec("crm_resource"), "--cleanup"]
+    if resource:
+        cmd.extend(["--resource", resource])
+    if node:
+        cmd.extend(["--node", node])
+
+    output, retval = runner.run(cmd)
+
+    if retval != 0:
+        raise LibraryError(
+            reports.resource_cleanup_error(retval, output, resource, node)
+        )
+    return output
+
+def nodes_standby(runner, node_list=None, all_nodes=False):
+    return __nodes_standby_unstandby(runner, True, node_list, all_nodes)
+
+def nodes_unstandby(runner, node_list=None, all_nodes=False):
+    return __nodes_standby_unstandby(runner, False, node_list, all_nodes)
+
+def has_resource_wait_support(runner):
+    # returns 1 on success so we don't care about retval
+    output, dummy_retval = runner.run([__exec("crm_resource"), "-?"])
+    return "--wait" in output
+
+def ensure_resource_wait_support(runner):
+    if not has_resource_wait_support(runner):
+        raise LibraryError(reports.resource_wait_not_supported())
+
+def wait_for_resources(runner, timeout=None):
+    args = [__exec("crm_resource"), "--wait"]
+    if timeout is not None:
+        args.append("--timeout={0}".format(timeout))
+    output, retval = runner.run(args)
+    if retval != 0:
+        if retval == __EXITCODE_WAIT_TIMEOUT:
+            raise LibraryError(
+                reports.resource_wait_timed_out(retval, output.strip())
+            )
+        else:
+            raise LibraryError(
+                reports.resource_wait_error(retval, output.strip())
+            )
+
+def __nodes_standby_unstandby(
+    runner, standby=True, node_list=None, all_nodes=False
+):
+    if node_list or all_nodes:
+        # TODO once we switch to editing CIB instead of running crm_stanby, we
+        # cannot always relly on getClusterState. If we're not editing a CIB
+        # from a live cluster, there is no status.
+        state = ClusterState(get_cluster_status_xml(runner)).node_section.nodes
+        known_nodes = [node.attrs.name for node in state]
+
+        if all_nodes:
+            node_list = known_nodes
+        elif node_list:
+            report = []
+            for node in node_list:
+                if node not in known_nodes:
+                    report.append(reports.node_not_found(node))
+            if report:
+                raise LibraryError(*report)
+
+    # TODO Edit CIB directly instead of running commands for each node; be aware
+    # remote nodes might not be in the CIB yet so we need to put them there.
+    cmd_template = [__exec("crm_standby")]
+    cmd_template.extend(["-v", "on"] if standby else ["-D"])
+    cmd_list = []
+    if node_list:
+        for node in node_list:
+            cmd_list.append(cmd_template + ["-N", node])
+    else:
+        cmd_list.append(cmd_template)
+    report = []
+    for cmd in cmd_list:
+        output, retval = runner.run(cmd)
+        if retval != 0:
+            report.append(reports.common_error(output))
+    if report:
+        raise LibraryError(*report)
+
+def __get_local_node_name(runner):
+    # It would be possible to run "crm_node --name" to get the name in one call,
+    # but it returns false names when cluster is not running (or we are on
+    # a remote node). Getting node id first is reliable since it fails in those
+    # cases.
+    output, retval = runner.run([__exec("crm_node"), "--cluster-id"])
+    if retval != 0:
+        raise LibraryError(
+            reports.pacemaker_local_node_name_not_found("node id not found")
+        )
+    node_id = output.strip()
+
+    output, retval = runner.run(
+        [__exec("crm_node"), "--name-for-id={0}".format(node_id)]
+    )
+    if retval != 0:
+        raise LibraryError(
+            reports.pacemaker_local_node_name_not_found("node name not found")
+        )
+    node_name = output.strip()
+
+    if node_name == "(null)":
+        raise LibraryError(
+            reports.pacemaker_local_node_name_not_found("node name is null")
+        )
+    return node_name
diff --git a/pcs/lib/pacemaker_state.py b/pcs/lib/pacemaker_state.py
new file mode 100644
index 0000000..b413b90
--- /dev/null
+++ b/pcs/lib/pacemaker_state.py
@@ -0,0 +1,153 @@
+'''
+The intention is put there knowledge about cluster state structure.
+Hide information about underlaying xml is desired too.
+'''
+
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import os.path
+
+from lxml import etree
+
+from pcs import settings
+from pcs.lib import reports
+from pcs.lib.errors import LibraryError
+from pcs.lib.pacemaker_values import is_true
+
+class _Attrs(object):
+    def __init__(self, owner_name, attrib, required_attrs):
+        '''
+        attrib lxml.etree._Attrib - wrapped attribute collection
+        required_attrs dict of required atribute names object_name:xml_attribute
+        '''
+        self.owner_name = owner_name
+        self.attrib = attrib
+        self.required_attrs = required_attrs
+
+    def __getattr__(self, name):
+        if name in self.required_attrs.keys():
+            try:
+                attr_specification = self.required_attrs[name]
+                if isinstance(attr_specification, tuple):
+                    attr_name, attr_transform = attr_specification
+                    return attr_transform(self.attrib[attr_name])
+                else:
+                    return self.attrib[attr_specification]
+            except KeyError:
+                raise AttributeError(
+                    "Missing attribute '{0}' ('{1}' in source) in '{2}'"
+                    .format(name, self.required_attrs[name], self.owner_name)
+                )
+
+        raise AttributeError(
+            "'{0}' does not declare attribute '{1}'"
+            .format(self.owner_name, name)
+        )
+
+class _Children(object):
+    def __init__(self, owner_name, dom_part, children, sections):
+        self.owner_name = owner_name
+        self.dom_part = dom_part
+        self.children = children
+        self.sections = sections
+
+    def __getattr__(self, name):
+        if name in self.children.keys():
+            element_name, wrapper = self.children[name]
+            return [
+                wrapper(element)
+                for element in self.dom_part.findall('.//' + element_name)
+            ]
+
+        if name in self.sections.keys():
+            element_name, wrapper = self.sections[name]
+            return wrapper(self.dom_part.findall('.//' + element_name)[0])
+
+        raise AttributeError(
+            "'{0}' does not declare child or section '{1}'"
+            .format(self.owner_name, name)
+        )
+
+class _Element(object):
+    required_attrs = {}
+    children = {}
+    sections = {}
+
+    def __init__(self, dom_part):
+        self.dom_part = dom_part
+        self.attrs = _Attrs(
+            self.__class__.__name__,
+            self.dom_part.attrib,
+            self.required_attrs
+        )
+        self.children_access = _Children(
+            self.__class__.__name__,
+            self.dom_part,
+            self.children,
+            self.sections,
+        )
+
+    def __getattr__(self, name):
+        return getattr(self.children_access, name)
+
+class _SummaryNodes(_Element):
+    required_attrs = {
+        'count': ('number', int),
+    }
+
+class _SummaryResources(_Element):
+    required_attrs = {
+        'count': ('number', int),
+    }
+
+class _SummarySection(_Element):
+    sections = {
+        'nodes': ('nodes_configured', _SummaryNodes),
+        'resources': ('resources_configured', _SummaryResources),
+    }
+
+class _Node(_Element):
+    required_attrs = {
+        'id': 'id',
+        'name': 'name',
+        'type': 'type',
+        'online': ('online', is_true),
+        'standby': ('standby', is_true),
+        'standby_onfail': ('standby_onfail', is_true),
+        'maintenance': ('maintenance', is_true),
+        'pending': ('pending', is_true),
+        'unclean': ('unclean', is_true),
+        'shutdown': ('shutdown', is_true),
+        'expected_up': ('expected_up', is_true),
+        'is_dc': ('is_dc', is_true),
+        'resources_running': ('resources_running', int),
+    }
+
+class _NodeSection(_Element):
+    children = {
+        'nodes': ('node', _Node),
+    }
+
+def _get_valid_cluster_state_dom(xml):
+    try:
+        dom = etree.fromstring(xml)
+        if os.path.isfile(settings.crm_mon_schema):
+            etree.RelaxNG(file=settings.crm_mon_schema).assertValid(dom)
+        return dom
+    except (etree.XMLSyntaxError, etree.DocumentInvalid):
+        raise LibraryError(reports.cluster_state_invalid_format())
+
+class ClusterState(_Element):
+    sections = {
+        'summary': ('summary', _SummarySection),
+        'node_section': ('nodes', _NodeSection),
+    }
+
+    def __init__(self, xml):
+        self.dom = _get_valid_cluster_state_dom(xml)
+        super(ClusterState, self).__init__(self.dom)
diff --git a/pcs/lib/pacemaker_values.py b/pcs/lib/pacemaker_values.py
new file mode 100644
index 0000000..9ab6929
--- /dev/null
+++ b/pcs/lib/pacemaker_values.py
@@ -0,0 +1,100 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import re
+
+from pcs.lib import reports
+from pcs.lib.errors import LibraryError
+
+
+__BOOLEAN_TRUE = ["true", "on", "yes", "y", "1"]
+__BOOLEAN_FALSE = ["false", "off", "no", "n", "0"]
+SCORE_INFINITY = "INFINITY"
+
+
+def is_true(val):
+    """
+    Does pacemaker consider a value to be true?
+    See crm_is_true in pacemaker/lib/common/utils.c
+    var checked value
+    """
+    return val.lower() in __BOOLEAN_TRUE
+
+def is_boolean(val):
+    """
+    Does pacemaker consider a value to be a boolean?
+    See crm_is_true in pacemaker/lib/common/utils.c
+    val checked value
+    """
+    return val.lower() in __BOOLEAN_TRUE + __BOOLEAN_FALSE
+
+def timeout_to_seconds(timeout, return_unknown=False):
+    """
+    Transform pacemaker style timeout to number of seconds
+    timeout timeout string
+    return_unknown if timeout is not valid then return None on False or timeout
+        on True (default False)
+    """
+    if timeout.isdigit():
+        return int(timeout)
+    suffix_multiplier = {
+        "s": 1,
+        "sec": 1,
+        "m": 60,
+        "min": 60,
+        "h": 3600,
+        "hr": 3600,
+    }
+    for suffix, multiplier in suffix_multiplier.items():
+        if timeout.endswith(suffix) and timeout[:-len(suffix)].isdigit():
+            return int(timeout[:-len(suffix)]) * multiplier
+    return timeout if return_unknown else None
+
+def get_valid_timeout_seconds(timeout_candidate):
+    """
+    Transform pacemaker style timeout to number of seconds, raise LibraryError
+        on invalid timeout
+    timeout_candidate timeout string or None
+    """
+    if timeout_candidate is None:
+        return None
+    wait_timeout = timeout_to_seconds(timeout_candidate)
+    if wait_timeout is None:
+        raise LibraryError(reports.invalid_timeout(timeout_candidate))
+    return wait_timeout
+
+def validate_id(id_candidate, description="id"):
+    """
+    Validate a pacemaker id, raise LibraryError on invalid id.
+
+    id_candidate id's value
+    description id's role description (default "id")
+    """
+    # see NCName definition
+    # http://www.w3.org/TR/REC-xml-names/#NT-NCName
+    # http://www.w3.org/TR/REC-xml/#NT-Name
+    if len(id_candidate) < 1:
+        raise LibraryError(reports.invalid_id_is_empty(
+            id_candidate, description
+        ))
+    first_char_re = re.compile("[a-zA-Z_]")
+    if not first_char_re.match(id_candidate[0]):
+        raise LibraryError(reports.invalid_id_bad_char(
+            id_candidate, description, id_candidate[0], True
+        ))
+    char_re = re.compile("[a-zA-Z0-9_.-]")
+    for char in id_candidate[1:]:
+        if not char_re.match(char):
+            raise LibraryError(reports.invalid_id_bad_char(
+                id_candidate, description, char, False
+            ))
+
+def is_score_value(value):
+    if not value:
+        return False
+    unsigned_value = value[1:] if value[0] in ("+", "-") else value
+    return unsigned_value == SCORE_INFINITY or unsigned_value.isdigit()
diff --git a/pcs/lib/reports.py b/pcs/lib/reports.py
new file mode 100644
index 0000000..e54bce8
--- /dev/null
+++ b/pcs/lib/reports.py
@@ -0,0 +1,916 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from collections import Iterable
+
+from pcs.common import report_codes
+from pcs.lib.errors import ReportItem, ReportItemSeverity
+
+
+def common_error(text):
+    # TODO replace by more specific reports
+    """
+    unspecified error with text message, do not use unless absolutely necessary
+    """
+    return ReportItem.error(
+        report_codes.COMMON_ERROR,
+        "{text}",
+        info={"text": text}
+    )
+
+def common_info(text):
+    # TODO replace by more specific reports
+    """
+    unspecified info with text message, do not use unless absolutely necessary
+    """
+    return ReportItem.info(
+        report_codes.COMMON_INFO,
+        "{text}",
+        info={"text": text}
+    )
+
+def resource_for_constraint_is_multiinstance(
+    resource_id, parent_type, parent_id,
+    severity=ReportItemSeverity.ERROR, forceable=None
+):
+    """
+    when setting up a constraint a resource in a clone or a master was specified
+    resource_id string specified resource
+    parent_type string "clone" or "master"
+    parent_id clone or master resource id
+    severity report item severity
+    forceable is this report item forceable? by what cathegory?
+    """
+    template = (
+        "{resource_id} is a clone resource, you should use the"
+        + " clone id: {parent_id} when adding constraints"
+    )
+    if parent_type == "master":
+        template = (
+            "{resource_id} is a master/slave resource, you should use the"
+            + " master id: {parent_id} when adding constraints"
+        )
+
+    return ReportItem(
+        report_codes.RESOURCE_FOR_CONSTRAINT_IS_MULTIINSTANCE,
+        severity,
+        template,
+        info={
+            "resource_id": resource_id,
+            "parent_type": parent_type,
+            "parent_id": parent_id,
+        },
+        forceable=forceable
+    )
+
+def duplicate_constraints_exist(
+    constraint_type, constraint_info_list,
+    severity=ReportItemSeverity.ERROR, forceable=None
+):
+    """
+    when creating a constraint it was detected the constraint already exists
+    constraint_type string "rsc_colocation", "rsc_order", "rsc_ticket"
+    constraint_info_list list of structured constraint data according to type
+    severity report item severity
+    forceable is this report item forceable? by what cathegory?
+    """
+    return ReportItem(
+        report_codes.DUPLICATE_CONSTRAINTS_EXIST,
+        severity,
+        "duplicate constraint already exists",
+        info={
+            "constraint_type": constraint_type,
+            "constraint_info_list": constraint_info_list,
+        },
+        forceable=forceable
+    )
+
+def empty_resource_set_list():
+    """
+    an empty resource set has been specified, which is not allowed by cib schema
+    """
+    return ReportItem.error(
+        report_codes.EMPTY_RESOURCE_SET_LIST,
+        "Resource set list is empty",
+    )
+
+def required_option_is_missing(name):
+    """
+    required option has not been specified, command cannot continue
+    """
+    return ReportItem.error(
+        report_codes.REQUIRED_OPTION_IS_MISSING,
+        "required option '{option_name}' is missing",
+        info={
+            "option_name": name
+        }
+    )
+
+def invalid_option(
+    option_name, allowed_options, option_type,
+    severity=ReportItemSeverity.ERROR, forceable=None
+):
+    """
+    specified option name is not valid, usualy an error or a warning
+    option_name specified invalid option name
+    allowed_options iterable of possible allowed option names
+    option_type decsribes the option
+    severity report item severity
+    forceable is this report item forceable? by what cathegory?
+    """
+    msg = "invalid option '{option_name}', allowed options are: {allowed_str}"
+    info = {
+        "option_name": option_name,
+        "option_type": option_type,
+        "allowed": sorted(allowed_options),
+        "allowed_str": ", ".join(sorted(allowed_options)),
+    }
+    if option_type:
+        msg = ("invalid {option_type} option '{option_name}'"
+            + ", allowed options are: {allowed_str}")
+    return ReportItem(
+        report_codes.INVALID_OPTION, severity, msg, forceable, info
+    )
+
+def invalid_option_value(
+    option_name, option_value, allowed_values,
+    severity=ReportItemSeverity.ERROR, forceable=None
+):
+    """
+    specified value is not valid for the option, usualy an error or a warning
+    option_name specified option name whose value is not valid
+    option_value specified value which is not valid
+    allowed_options list of allowed values or string description
+    severity report item severity
+    forceable is this report item forceable? by what cathegory?
+    """
+    allowed_iterable = (
+        isinstance(allowed_values, Iterable)
+        and
+        not isinstance(allowed_values, "".__class__)
+    )
+    allowed_str = (", ".join(allowed_values) if allowed_iterable
+        else allowed_values)
+    return ReportItem(
+        report_codes.INVALID_OPTION_VALUE,
+        severity,
+        "'{option_value}' is not a valid {option_name} value"
+            + ", use {allowed_values_str}",
+        info={
+            "option_value": option_value,
+            "option_name": option_name,
+            "allowed_values": allowed_values,
+            "allowed_values_str": allowed_str,
+        },
+        forceable=forceable
+    )
+
+def invalid_id_is_empty(id, id_description):
+    """
+    empty string was specified as an id, which is not valid
+    id string specified id
+    id_description string decribe id's role
+    """
+    return ReportItem.error(
+        report_codes.INVALID_ID,
+        "{id_description} cannot be empty",
+        info={
+            "id": id,
+            "id_description": id_description,
+            "reason": "empty",
+        }
+    )
+
+def invalid_id_bad_char(id, id_description, bad_char, is_first_char):
+    """
+    specified id is not valid as it contains a forbidden character
+    id string specified id
+    id_description string decribe id's role
+    bad_char forbidden character
+    is_first_char is it the first character which is forbidden?
+    """
+    return ReportItem.error(
+        report_codes.INVALID_ID,
+        (
+            "invalid {{id_description}} '{{id}}', '{{invalid_character}}' "
+            + "is not a valid{0}character for a {{id_description}}"
+        ).format(" first " if is_first_char else " "),
+        info={
+            "id": id,
+            "id_description": id_description,
+            "reason": "invalid{0}character".format(
+                " first " if is_first_char else " "
+            ),
+            "invalid_character": bad_char,
+        }
+    )
+
+def invalid_timeout(timeout):
+    """
+    specified timeout is not valid (number or other format e.g. 2min)
+    timeout string specified invalid timeout
+    """
+    return ReportItem.error(
+        report_codes.INVALID_TIMEOUT_VALUE,
+        "'{timeout}' is not a valid number of seconds to wait",
+        info={"timeout": timeout}
+    )
+
+def invalid_score(score):
+    """
+    specified score value is not valid
+    score specified score value
+    """
+    return ReportItem.error(
+        report_codes.INVALID_SCORE,
+        "invalid score '{score}', use integer or INFINITY or -INFINITY",
+        info={
+            "score": score,
+        }
+    )
+
+def multiple_score_options():
+    """
+    more than one of mutually exclusive score options has been set
+    (score, score-attribute, score-attribute-mangle in rules or colocation sets)
+    """
+    return ReportItem.error(
+        report_codes.MULTIPLE_SCORE_OPTIONS,
+        "you cannot specify multiple score options",
+    )
+
+def run_external_process_started(command, stdin):
+    """
+    information about running an external process
+    command string the external process command
+    stdin string passed to the external process via its stdin
+    """
+    msg = "Running: {command}"
+    if stdin:
+        msg += "\n--Debug Input Start--\n{stdin}\n--Debug Input End--"
+    msg += "\n"
+    return ReportItem.debug(
+        report_codes.RUN_EXTERNAL_PROCESS_STARTED,
+        msg,
+        info={
+            "command": command,
+            "stdin": stdin,
+        }
+    )
+
+def run_external_process_finished(command, retval, stdout):
+    """
+    information about result of running an external process
+    command string the external process command
+    retval external process's return (exit) code
+    stdout string external process's stdout
+    """
+    return ReportItem.debug(
+        report_codes.RUN_EXTERNAL_PROCESS_FINISHED,
+        "Finished running: {command}\nReturn value: {return_value}"
+        + "\n--Debug Output Start--\n{stdout}\n--Debug Output End--\n",
+        info={
+            "command": command,
+            "return_value": retval,
+            "stdout": stdout,
+        }
+    )
+
+def run_external_process_error(command, reason):
+    """
+    attempt to run an external process failed
+    command string the external process command
+    reason string error description
+    """
+    return ReportItem.error(
+        report_codes.RUN_EXTERNAL_PROCESS_ERROR,
+        "unable to run command {command}: {reason}",
+        info={
+            "command": command,
+            "reason": reason
+        }
+    )
+
+def node_communication_started(target, data):
+    """
+    request is about to be sent to a remote node, debug info
+    target string where the request is about to be sent to
+    data string request's data
+    """
+    msg = "Sending HTTP Request to: {target}"
+    if data:
+        msg += "\n--Debug Input Start--\n{data}\n--Debug Input End--"
+    msg += "\n"
+    return ReportItem.debug(
+        report_codes.NODE_COMMUNICATION_STARTED,
+        msg,
+        info={
+            "target": target,
+            "data": data,
+        }
+    )
+
+def node_communication_finished(target, retval, data):
+    """
+    remote node request has been finished, debug info
+    target string where the request was sent to
+    retval response return code
+    data response data
+    """
+    return ReportItem.debug(
+        report_codes.NODE_COMMUNICATION_FINISHED,
+        "Finished calling: {target}\nResponse Code: {response_code}"
+        + "\n--Debug Response Start--\n{response_data}\n--Debug Response End--"
+        + "\n",
+        info={
+            "target": target,
+            "response_code": retval,
+            "response_data": data
+        }
+    )
+
+def node_communication_not_connected(node, reason):
+    """
+    an error occured when connecting to a remote node, debug info
+    node string node address / name
+    reason string decription of the error
+    """
+    return ReportItem.debug(
+        report_codes.NODE_COMMUNICATION_NOT_CONNECTED,
+        "Unable to connect to {node} ({reason})",
+        info={
+            "node": node,
+            "reason": reason,
+        }
+    )
+
+def node_communication_error_not_authorized(
+    node, command, reason,
+    severity=ReportItemSeverity.ERROR, forceable=None
+):
+    """
+    node rejected a request as we are not authorized
+    node string node address / name
+    reason string decription of the error
+    """
+    return ReportItem(
+        report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
+        severity,
+        "Unable to authenticate to {node} ({reason}), try running 'pcs cluster auth'",
+        info={
+            "node": node,
+            "command": command,
+            "reason": reason,
+        },
+        forceable=forceable
+    )
+
+def node_communication_error_permission_denied(
+    node, command, reason,
+    severity=ReportItemSeverity.ERROR, forceable=None
+):
+    """
+    node rejected a request as we do not have permissions to run the request
+    node string node address / name
+    reason string decription of the error
+    """
+    return ReportItem(
+        report_codes.NODE_COMMUNICATION_ERROR_PERMISSION_DENIED,
+        severity,
+        "{node}: Permission denied ({reason})",
+        info={
+            "node": node,
+            "command": command,
+            "reason": reason,
+        },
+        forceable=forceable
+    )
+
+def node_communication_error_unsupported_command(
+    node, command, reason,
+    severity=ReportItemSeverity.ERROR, forceable=None
+):
+    """
+    node rejected a request as it does not support the request
+    node string node address / name
+    reason string decription of the error
+    """
+    return ReportItem(
+        report_codes.NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND,
+        severity,
+        "{node}: Unsupported command ({reason}), try upgrading pcsd",
+        info={
+            "node": node,
+            "command": command,
+            "reason": reason,
+        },
+        forceable=forceable
+    )
+
+def node_communication_error_other_error(
+    node, command, reason,
+    severity=ReportItemSeverity.ERROR, forceable=None
+):
+    """
+    node rejected a request for another reason (may be faulty node)
+    node string node address / name
+    reason string decription of the error
+    """
+    return ReportItem(
+        report_codes.NODE_COMMUNICATION_ERROR,
+        severity,
+        "Error connecting to {node} ({reason})",
+        info={
+            "node": node,
+            "command": command,
+            "reason": reason,
+        },
+        forceable=forceable
+    )
+
+def node_communication_error_unable_to_connect(
+    node, command, reason,
+    severity=ReportItemSeverity.ERROR, forceable=None
+):
+    """
+    we were unable to connect to a node
+    node string node address / name
+    reason string decription of the error
+    """
+    return ReportItem(
+        report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+        severity,
+        "Unable to connect to {node} ({reason})",
+        info={
+            "node": node,
+            "command": command,
+            "reason": reason,
+        },
+        forceable=forceable
+    )
+
+def corosync_config_distribution_started():
+    """
+    corosync configuration is about to be sent to nodes
+    """
+    return ReportItem.info(
+        report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED,
+        "Sending updated corosync.conf to nodes..."
+    )
+
+def corosync_config_accepted_by_node(node):
+    """
+    corosync configuration has been accepted by a node
+    node string node address / name
+    """
+    return ReportItem.info(
+        report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+        "{node}: Succeeded",
+        info={"node": node}
+    )
+
+def corosync_config_distribution_node_error(
+    node,
+    severity=ReportItemSeverity.ERROR, forceable=None
+):
+    """
+    communication error occured when saving corosync configuration to a node
+    node string faulty node address / name
+    """
+    return ReportItem(
+        report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
+        severity,
+        "{node}: Unable to set corosync config",
+        info={"node": node},
+        forceable=forceable
+    )
+
+def corosync_not_running_check_started():
+    """
+    we are about to make sure corosync is not running on nodes
+    """
+    return ReportItem.info(
+        report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED,
+        "Checking corosync is not running on nodes..."
+    )
+
+def corosync_not_running_check_node_error(
+    node,
+    severity=ReportItemSeverity.ERROR, forceable=None
+):
+    """
+    communication error occured when checking corosync is not running on a node
+    node string faulty node address / name
+    """
+    return ReportItem(
+        report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
+        severity,
+        "{node}: Unable to check if corosync is not running",
+        info={"node": node},
+        forceable=forceable
+    )
+
+def corosync_not_running_on_node_ok(node):
+    """
+    corosync is not running on a node, which is ok
+    node string node address / name
+    """
+    return ReportItem.info(
+        report_codes.COROSYNC_NOT_RUNNING_ON_NODE,
+        "{node}: corosync is not running",
+        info={"node": node}
+    )
+
+def corosync_running_on_node_fail(node):
+    """
+    corosync is running on a node, which is not ok
+    node string node address / name
+    """
+    return ReportItem.error(
+        report_codes.COROSYNC_RUNNING_ON_NODE,
+        "{node}: corosync is running",
+        info={"node": node}
+    )
+
+def corosync_config_reloaded():
+    """
+    corosync configuration has been reloaded
+    """
+    return ReportItem.info(
+        report_codes.COROSYNC_CONFIG_RELOADED,
+        "Corosync configuration reloaded"
+    )
+
+def corosync_config_reload_error(reason):
+    """
+    an error occured when reloading corosync configuration
+    reason string an error message
+    """
+    return ReportItem.error(
+        report_codes.COROSYNC_CONFIG_RELOAD_ERROR,
+        "Unable to reload corosync configuration: {reason}",
+        info={"reason": reason}
+    )
+
+def corosync_config_read_error(path, reason):
+    """
+    an error occured when reading corosync configuration file from disk
+    reason string an error message
+    """
+    return ReportItem.error(
+        report_codes.UNABLE_TO_READ_COROSYNC_CONFIG,
+        "Unable to read {path}: {reason}",
+        info={
+            "path": path,
+            "reason": reason,
+        }
+    )
+
+def corosync_config_parser_missing_closing_brace():
+    """
+    corosync config cannot be parsed due to missing closing brace
+    """
+    return ReportItem.error(
+        report_codes.PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE,
+        "Unable to parse corosync config: missing closing brace"
+    )
+
+def corosync_config_parser_unexpected_closing_brace():
+    """
+    corosync config cannot be parsed due to unexpected closing brace
+    """
+    return ReportItem.error(
+        report_codes.PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE,
+        "Unable to parse corosync config: unexpected closing brace"
+    )
+
+def corosync_config_parser_other_error():
+    """
+    corosync config cannot be parsed, the cause is not specified
+    It is better to use more specific error if possible.
+    """
+    return ReportItem.error(
+        report_codes.PARSE_ERROR_COROSYNC_CONF,
+        "Unable to parse corosync config"
+    )
+
+def qdevice_already_defined():
+    """
+    qdevice is already set up in a cluster, when it was expected not to be
+    """
+    return ReportItem.error(
+        report_codes.QDEVICE_ALREADY_DEFINED,
+        "quorum device is already defined"
+    )
+
+def qdevice_not_defined():
+    """
+    qdevice is not set up in a cluster, when it was expected to be
+    """
+    return ReportItem.error(
+        report_codes.QDEVICE_NOT_DEFINED,
+        "no quorum device is defined in this cluster"
+    )
+
+def cman_unsupported_command():
+    """
+    requested library command is not available as local cluster is CMAN based
+    """
+    return ReportItem.error(
+        report_codes.CMAN_UNSUPPORTED_COMMAND,
+        "This command is not supported on CMAN clusters"
+    )
+
+def id_already_exists(id):
+    """
+    specified id already exists in CIB and cannot be used for a new CIB object
+    id string existing id
+    """
+    return ReportItem.error(
+        report_codes.ID_ALREADY_EXISTS,
+        "'{id}' already exists",
+        info={"id": id}
+    )
+
+def id_not_found(id, id_description):
+    """
+    specified id does not exist in CIB, user referenced a nonexisting id
+    use "resource_does_not_exist" if id is a resource id
+    id string specified id
+    id_description string decribe id's role
+    """
+    return ReportItem.error(
+        report_codes.ID_NOT_FOUND,
+        ("{id_description} " if id_description else "") + "'{id}' does not exist",
+        info={
+            "id": id,
+            "id_description": id_description,
+        }
+    )
+
+def resource_does_not_exist(resource_id):
+    """
+    specified resource does not exist (e.g. when creating in constraints)
+    resource_id string specified resource id
+    """
+    return ReportItem.error(
+        report_codes.RESOURCE_DOES_NOT_EXIST,
+        "Resource '{resource_id}' does not exist",
+        info={
+            "resource_id": resource_id,
+        }
+    )
+
+def cib_load_error(retval, stdout):
+    """
+    cannot load cib from cibadmin, cibadmin exited with non-zero code
+    retval external process's return (exit) code
+    stdout string external process's stdout
+    """
+    return ReportItem.error(
+        report_codes.CIB_LOAD_ERROR,
+        "unable to get cib",
+        info={
+            "return_value": retval,
+            "stdout": stdout,
+        }
+    )
+
+def cib_load_error_scope_missing(scope, retval, stdout):
+    """
+    cannot load cib from cibadmin, specified scope is missing in the cib
+    scope string requested cib scope
+    retval external process's return (exit) code
+    stdout string external process's stdout
+    """
+    return ReportItem.error(
+        report_codes.CIB_LOAD_ERROR_SCOPE_MISSING,
+        "unable to get cib, scope '{scope}' not present in cib",
+        info={
+            "scope": scope,
+            "return_value": retval,
+            "stdout": stdout,
+        }
+    )
+
+def cib_load_error_invalid_format():
+    """
+    cib does not conform to the schema
+    """
+    return ReportItem.error(
+        report_codes.CIB_LOAD_ERROR_BAD_FORMAT,
+        "unable to get cib, xml does not conform to the schema"
+    )
+
+def cib_missing_mandatory_section(section_name):
+    """
+    CIB is missing a section which is required to be present
+    section_name string name of the missing section (element name or path)
+    """
+    return ReportItem.error(
+        report_codes.CIB_CANNOT_FIND_MANDATORY_SECTION,
+        "Unable to get {section} section of cib",
+        info={
+            "section": section_name,
+        }
+    )
+
+def cib_push_error(retval, stdout):
+    """
+    cannot push cib to cibadmin, cibadmin exited with non-zero code
+    retval external process's return (exit) code
+    stdout string external process's stdout
+    """
+    return ReportItem.error(
+        report_codes.CIB_PUSH_ERROR,
+        "Unable to update cib\n{stdout}",
+        info={
+            "return_value": retval,
+            "stdout": stdout,
+        }
+    )
+
+def cluster_state_cannot_load(retval, stdout):
+    """
+    cannot load cluster status from crm_mon, crm_mon exited with non-zero code
+    retval external process's return (exit) code
+    stdout string external process's stdout
+    """
+    return ReportItem.error(
+        report_codes.CRM_MON_ERROR,
+        "error running crm_mon, is pacemaker running?",
+        info={
+            "return_value": retval,
+            "stdout": stdout,
+        }
+    )
+
+def cluster_state_invalid_format():
+    """
+    crm_mon xml output does not conform to the schema
+    """
+    return ReportItem.error(
+        report_codes.BAD_CLUSTER_STATE_FORMAT,
+        "cannot load cluster status, xml does not conform to the schema"
+    )
+
+def resource_wait_not_supported():
+    """
+    crm_resource does not support --wait
+    """
+    return ReportItem.error(
+        report_codes.RESOURCE_WAIT_NOT_SUPPORTED,
+        "crm_resource does not support --wait, please upgrade pacemaker"
+    )
+
+def resource_wait_timed_out(retval, stdout):
+    """
+    waiting for resources (crm_resource --wait) failed, timeout expired
+    retval external process's return (exit) code
+    stdout string external process's stdout
+    """
+    return ReportItem.error(
+        report_codes.RESOURCE_WAIT_TIMED_OUT,
+        "waiting timeout\n\n{stdout}",
+        info={
+            "return_value": retval,
+            "stdout": stdout,
+        }
+    )
+
+def resource_wait_error(retval, stdout):
+    """
+    waiting for resources (crm_resource --wait) failed
+    retval external process's return (exit) code
+    stdout string external process's stdout
+    """
+    return ReportItem.error(
+        report_codes.RESOURCE_WAIT_ERROR,
+        "{stdout}",
+        info={
+            "return_value": retval,
+            "stdout": stdout,
+        }
+    )
+
+def resource_cleanup_error(retval, stdout, resource=None, node=None):
+    """
+    an error occured when deleting resource history in pacemaker
+    retval external process's return (exit) code
+    stdout string external process's stdout
+    resource string resource which has been cleaned up
+    node string node which has been cleaned up
+    """
+    if resource:
+        text = "Unable to cleanup resource: {resource}\n{stdout}"
+    else:
+        text = (
+            "Unexpected error occured. 'crm_resource -C' err_code: "
+            + "{return_value}\n{stdout}"
+        )
+    return ReportItem.error(
+        report_codes.RESOURCE_CLEANUP_ERROR,
+        text,
+        info={
+            "return_value": retval,
+            "stdout": stdout,
+            "resource": resource,
+            "node": node,
+        }
+    )
+
+def resource_cleanup_too_time_consuming(threshold):
+    """
+    resource cleanup will execute more than threshold operations in a cluster
+    threshold current threshold for trigerring this error
+    """
+    return ReportItem.error(
+        report_codes.RESOURCE_CLEANUP_TOO_TIME_CONSUMING,
+        "Cleaning up all resources on all nodes will execute more "
+            + "than {threshold} operations in the cluster, which may "
+            + "negatively impact the responsiveness of the cluster. "
+            + "Consider specifying resource and/or node"
+        ,
+        info={"threshold": threshold},
+        forceable=report_codes.FORCE_LOAD_THRESHOLD
+    )
+
+def node_not_found(node):
+    """
+    specified node does not exist
+    node string specified node
+    """
+    return ReportItem.error(
+        report_codes.NODE_NOT_FOUND,
+        "node '{node}' does not appear to exist in configuration",
+        info={"node": node}
+    )
+
+def pacemaker_local_node_name_not_found(reason):
+    """
+    we are unable to figure out pacemaker's local node's name
+    reason string error message
+    """
+    return ReportItem.error(
+        report_codes.PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND,
+        "unable to get local node name from pacemaker: {reason}",
+        info={"reason": reason}
+    )
+
+def rrp_active_not_supported(warning=False):
+    """
+    active RRP mode is not supported, require user confirmation
+    warning set to True if user confirmed he/she wants to proceed
+    """
+    return ReportItem(
+        report_codes.RRP_ACTIVE_NOT_SUPPORTED,
+        ReportItemSeverity.WARNING if warning else ReportItemSeverity.ERROR,
+        "using a RRP mode of 'active' is not supported or tested",
+        forceable=(None if warning else report_codes.FORCE_ACTIVE_RRP)
+    )
+
+def cman_ignored_option(option):
+    """
+    specified option is ignored as CMAN clusters do not support it
+    options string option name
+    """
+    return ReportItem.warning(
+        report_codes.IGNORED_CMAN_UNSUPPORTED_OPTION,
+        '{option_name} ignored as it is not supported on CMAN clusters',
+        info={'option_name': option}
+    )
+
+def rrp_addresses_transport_mismatch():
+    # TODO this knows too much about cmdline and needs to be fixed once
+    # client code is moved to library, probably by CmdLineInputError in cli
+    """
+    RRP defined by network addresses is not allowed when udp transport is used
+    """
+    return ReportItem.error(
+        report_codes.NON_UDP_TRANSPORT_ADDR_MISMATCH,
+        "--addr0 and --addr1 can only be used with --transport=udp"
+    )
+
+def cman_udpu_restart_required():
+    """
+    warn user it is required to restart CMAN cluster for changes to take effect
+    """
+    return ReportItem.warning(
+        report_codes.CMAN_UDPU_RESTART_REQUIRED,
+        "Using udpu transport on a CMAN cluster, "
+            + "cluster restart is required after node add or remove"
+    )
+
+def cman_broadcast_all_rings():
+    """
+    broadcast enabled in all rings, CMAN doesn't support 1 ring only broadcast
+    """
+    return ReportItem.warning(
+        report_codes.CMAN_BROADCAST_ALL_RINGS,
+        "Enabling broadcast for all rings as CMAN does not support "
+            + "broadcast in only one ring"
+    )
diff --git a/pcs/lib/resource_agent.py b/pcs/lib/resource_agent.py
new file mode 100644
index 0000000..a3c6650
--- /dev/null
+++ b/pcs/lib/resource_agent.py
@@ -0,0 +1,427 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import os
+from lxml import etree
+
+from pcs import settings
+from pcs.common import report_codes
+from pcs.lib.errors import LibraryError
+from pcs.lib.errors import ReportItem
+from pcs.lib.pacemaker_values import is_true
+from pcs.lib.external import is_path_runnable
+from pcs.common.tools import simple_cache
+
+
+class UnsupportedResourceAgent(LibraryError):
+    pass
+
+
+class InvalidAgentName(LibraryError):
+    pass
+
+
+class AgentNotFound(LibraryError):
+    pass
+
+
+class UnableToGetAgentMetadata(LibraryError):
+    pass
+
+
+class InvalidMetadataFormat(LibraryError):
+    pass
+
+
+def __is_path_abs(path):
+    return path == os.path.abspath(path)
+
+
+def __get_text_from_dom_element(element):
+    if element is None or element.text is None:
+        return ""
+    else:
+        return element.text.strip()
+
+
+def __get_invalid_metadata_format_exception():
+    return InvalidMetadataFormat(ReportItem.error(
+        report_codes.INVALID_METADATA_FORMAT,
+        "invalid agent metadata format",
+        forceable=True
+    ))
+
+
+def _get_parameter(parameter_dom):
+    """
+    Returns dictionary that describes parameter.
+    dictionary format:
+    {
+        name: name of parameter
+        longdesc: long description,
+        shortdesc: short description,
+        type: data type od parameter,
+        default: default value,
+        required: True if is required parameter, False otherwise
+    }
+
+    parameter_dom -- parameter dom element
+    """
+    if parameter_dom.tag != "parameter" or parameter_dom.get("name") is None:
+        raise __get_invalid_metadata_format_exception()
+
+    longdesc = __get_text_from_dom_element(parameter_dom.find("longdesc"))
+    shortdesc = __get_text_from_dom_element(parameter_dom.find("shortdesc"))
+
+    content = parameter_dom.find("content")
+    if content is None:
+        val_type = "string"
+    else:
+        val_type = content.get("type", "string")
+
+    return {
+        "name": parameter_dom.get("name"),
+        "longdesc": longdesc,
+        "shortdesc": shortdesc,
+        "type": val_type,
+        "default": None if content is None else content.get("default"),
+        "required": is_true(parameter_dom.get("required", "0"))
+    }
+
+
+def _get_agent_parameters(metadata_dom):
+    """
+    Returns list of parameters from agents metadata
+
+    metadata_dom -- agent's metadata dom
+    """
+    if metadata_dom.tag != "resource-agent":
+        raise __get_invalid_metadata_format_exception()
+
+    params_el = metadata_dom.find("parameters")
+    if params_el is None:
+        return []
+
+    return [
+        _get_parameter(parameter) for parameter in params_el.iter("parameter")
+    ]
+
+
+def _get_pcmk_advanced_stonith_parameters(runner):
+    """Returns advanced instance attributes for stonith devices"""
+    @simple_cache
+    def __get_stonithd_parameters():
+        output, retval = runner.run(
+            [settings.stonithd_binary, "metadata"], ignore_stderr=True
+        )
+        if output.strip() == "":
+            raise UnableToGetAgentMetadata(ReportItem.error(
+                report_codes.UNABLE_TO_GET_AGENT_METADATA,
+                "unable to get metadata of stonithd",
+                info={"external_exitcode": retval, "external_output": output},
+                forceable=True
+            ))
+
+        try:
+            params = _get_agent_parameters(etree.fromstring(output))
+            for param in params:
+                param["longdesc"] = "{0}\n{1}".format(
+                    param["shortdesc"], param["longdesc"]
+                ).strip()
+                is_advanced = param["shortdesc"].startswith("Advanced use only")
+                param["advanced"] = is_advanced
+            return params
+        except etree.XMLSyntaxError:
+            raise __get_invalid_metadata_format_exception()
+
+    return __get_stonithd_parameters()
+
+
+def get_fence_agent_metadata(runner, fence_agent):
+    """
+    Returns dom of metadata for specified fence agent
+
+    fence_agent -- fence agent name, should start with 'fence_'
+    """
+
+    def __get_error(info):
+        return UnableToGetAgentMetadata(ReportItem.error(
+            report_codes.UNABLE_TO_GET_AGENT_METADATA,
+            "unable to get metadata of fence agent '{agent_name}'",
+            info=info,
+            forceable=True
+        ))
+
+    script_path = os.path.join(settings.fence_agent_binaries, fence_agent)
+
+    if not (
+        fence_agent.startswith("fence_") and
+        __is_path_abs(script_path) and
+        is_path_runnable(script_path)
+    ):
+        raise AgentNotFound(ReportItem.error(
+            report_codes.INVALID_RESOURCE_NAME,
+            "fence agent '{agent_name}' not found",
+            info={"agent_name": fence_agent},
+            forceable=True
+        ))
+
+    output, retval = runner.run(
+        [script_path, "-o", "metadata"], ignore_stderr=True
+    )
+
+    if output.strip() == "":
+        raise __get_error({
+            "agent_name": fence_agent,
+            "external_exitcode": retval,
+            "external_output": output
+        })
+
+    try:
+        return etree.fromstring(output)
+    except etree.XMLSyntaxError as e:
+        raise __get_error({
+            "agent_name": fence_agent,
+            "error_info": str(e)
+        })
+
+
+def _get_nagios_resource_agent_metadata(agent):
+    """
+    Returns metadata dom for specified nagios resource agent
+
+    agent -- name of nagios resource agent
+    """
+    agent_name = "nagios:" + agent
+    metadata_path = os.path.join(settings.nagios_metadata_path, agent + ".xml")
+
+    if not __is_path_abs(metadata_path):
+        raise AgentNotFound(ReportItem.error(
+            report_codes.INVALID_RESOURCE_NAME,
+            "resource agent '{agent_name}' not found",
+            info={"agent_name": agent_name},
+            forceable=True
+        ))
+
+    try:
+        return etree.parse(metadata_path).getroot()
+    except Exception as e:
+        raise UnableToGetAgentMetadata(ReportItem.error(
+            report_codes.UNABLE_TO_GET_AGENT_METADATA,
+            "unable to get metadata of resource agent '{agent_name}': " +
+            "{error_info}",
+            info={
+                "agent_name": agent_name,
+                "error_info": str(e)
+            },
+            forceable=True
+        ))
+
+
+def _get_ocf_resource_agent_metadata(runner, provider, agent):
+    """
+    Returns metadata dom for specified ocf resource agent
+
+    provider -- resource agent provider
+    agent -- resource agent name
+    """
+    agent_name = "ocf:" + provider + ":" + agent
+
+    def __get_error(info):
+        return UnableToGetAgentMetadata(ReportItem.error(
+            report_codes.UNABLE_TO_GET_AGENT_METADATA,
+            "unable to get metadata of resource agent '{agent_name}'",
+            info=info,
+            forceable=True
+        ))
+
+    script_path = os.path.join(settings.ocf_resources, provider, agent)
+
+    if not __is_path_abs(script_path) or not is_path_runnable(script_path):
+        raise AgentNotFound(ReportItem.error(
+            report_codes.INVALID_RESOURCE_NAME,
+            "resource agent '{agent_name}' not found",
+            info={"agent_name": agent_name},
+            forceable=True
+        ))
+
+    output, retval = runner.run(
+        [script_path, "meta-data"],
+        env_extend={"OCF_ROOT": settings.ocf_root},
+        ignore_stderr=True
+    )
+
+    if output.strip() == "":
+        raise __get_error({
+            "agent_name": agent_name,
+            "external_exitcode": retval,
+            "external_output": output
+        })
+
+    try:
+        return etree.fromstring(output)
+    except etree.XMLSyntaxError as e:
+        raise __get_error({
+            "agent_name": agent_name,
+            "error_info": str(e)
+        })
+
+
+def get_agent_desc(metadata_dom):
+    """
+    Returns dictionary which contains description of agent from it's metadata.
+    dictionary format:
+    {
+        longdesc: long description
+        shortdesc: short description
+    }
+
+    metadata_dom -- metadata dom of agent
+    """
+    if metadata_dom.tag != "resource-agent":
+        raise __get_invalid_metadata_format_exception()
+
+    shortdesc_el = metadata_dom.find("shortdesc")
+    if shortdesc_el is None:
+        shortdesc = metadata_dom.get("shortdesc", "")
+    else:
+        shortdesc = shortdesc_el.text
+
+    return {
+        "longdesc": __get_text_from_dom_element(metadata_dom.find("longdesc")),
+        "shortdesc": "" if shortdesc is None else shortdesc.strip()
+    }
+
+
+def _filter_fence_agent_parameters(parameters):
+    """
+    Returns filtered list of fence agent parameters. It removes parameters
+    that user should not be setting.
+
+    parameters -- list of fence agent parameters
+    """
+    banned_parameters = ["debug", "action", "verbose", "version", "help"]
+    return [
+        param for param in parameters if param["name"] not in banned_parameters
+    ]
+
+
+def get_fence_agent_parameters(runner, metadata_dom):
+    """
+    Returns complete list of parameters for fence agent from it's metadata.
+
+    metadata_dom -- metadata dom of fence agent
+    """
+    return (
+        _filter_fence_agent_parameters(_get_agent_parameters(metadata_dom)) +
+        _get_pcmk_advanced_stonith_parameters(runner)
+    )
+
+
+def get_resource_agent_parameters(metadata_dom):
+    """
+    Returns complete list of parameters for resource agent from it's
+    metadata.
+
+    metadata_dom -- metadata dom of resource agent
+    """
+    return _get_agent_parameters(metadata_dom)
+
+
+def get_resource_agent_metadata(runner, agent):
+    """
+    Returns metadata of specified agent as dom
+
+    agent -- agent name
+    """
+    error = UnsupportedResourceAgent(ReportItem.error(
+        report_codes.UNSUPPORTED_RESOURCE_AGENT,
+        "resource agent '{agent}' is not supported",
+        info={"agent": agent},
+        forceable=True
+    ))
+    if agent.startswith("ocf:"):
+        agent_info = agent.split(":", 2)
+        if len(agent_info) != 3:
+            raise error
+        return _get_ocf_resource_agent_metadata(runner, *agent_info[1:])
+    elif agent.startswith("nagios:"):
+        return _get_nagios_resource_agent_metadata(agent.split("nagios:", 1)[1])
+    else:
+        raise error
+
+
+def _get_action(action_el):
+    """
+    Returns XML action element as dictionary, where all elements attributes
+    are key of dict
+
+    action_el -- action lxml.etree element
+    """
+    if action_el.tag != "action" or action_el.get("name") is None:
+        raise __get_invalid_metadata_format_exception()
+
+    return dict(action_el.items())
+
+
+def get_agent_actions(metadata_dom):
+    """
+    Returns list of actions from agents metadata
+
+    metadata_dom -- agent's metadata dom
+    """
+    if metadata_dom.tag != "resource-agent":
+        raise __get_invalid_metadata_format_exception()
+
+    actions_el = metadata_dom.find("actions")
+    if actions_el is None:
+        return []
+
+    return [
+        _get_action(action) for action in actions_el.iter("action")
+    ]
+
+
+def _validate_instance_attributes(agent_params, attrs):
+    valid_attrs = [attr["name"] for attr in agent_params]
+    required_missing = []
+
+    for attr in agent_params:
+        if attr["required"] and attr["name"] not in attrs:
+            required_missing.append(attr["name"])
+
+    return [attr for attr in attrs if attr not in valid_attrs], required_missing
+
+
+def validate_instance_attributes(runner, instance_attrs, agent):
+    """
+    Validates instance attributes according to specified agent.
+    Returns tuple of lists (<invalid attributes>, <missing required attributes>)
+
+    instance_attrs -- dictionary of instance attributes, where key is
+        attribute name and value is attribute value
+    agent -- full name (<class>:<agent> or <class>:<provider>:<agent>)
+        of resource/fence agent
+    """
+    if agent.startswith("stonith:"):
+        agent_params = get_fence_agent_parameters(
+            runner,
+            get_fence_agent_metadata(runner, agent.split("stonith:", 1)[1])
+        )
+        bad_attrs, missing_required = _validate_instance_attributes(
+            agent_params, instance_attrs
+        )
+        if "port" in missing_required:
+            # Temporarily make "port" an optional parameter. Once we are
+            # getting metadata from pacemaker, this will be reviewed and fixed.
+            missing_required.remove("port")
+        return bad_attrs, missing_required
+    else:
+        agent_params = get_resource_agent_parameters(
+            get_resource_agent_metadata(runner, agent)
+        )
+        return _validate_instance_attributes(agent_params, instance_attrs)
diff --git a/pcs/lib/test/__init__.py b/pcs/lib/test/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/lib/test/test_pacemaker_values.py b/pcs/lib/test/test_pacemaker_values.py
new file mode 100644
index 0000000..7979990
--- /dev/null
+++ b/pcs/lib/test/test_pacemaker_values.py
@@ -0,0 +1,252 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+
+from pcs.test.tools.assertions import assert_raise_library_error
+
+from pcs.common import report_codes
+from pcs.lib.errors import ReportItemSeverity as severity
+
+import pcs.lib.pacemaker_values as lib
+
+
+class BooleanTest(TestCase):
+    def test_true_is_true(self):
+        self.assertTrue(lib.is_true("true"))
+        self.assertTrue(lib.is_true("tRue"))
+        self.assertTrue(lib.is_true("on"))
+        self.assertTrue(lib.is_true("ON"))
+        self.assertTrue(lib.is_true("yes"))
+        self.assertTrue(lib.is_true("yeS"))
+        self.assertTrue(lib.is_true("y"))
+        self.assertTrue(lib.is_true("Y"))
+        self.assertTrue(lib.is_true("1"))
+
+    def test_nontrue_is_not_true(self):
+        self.assertFalse(lib.is_true(""))
+        self.assertFalse(lib.is_true(" 1 "))
+        self.assertFalse(lib.is_true("a"))
+        self.assertFalse(lib.is_true("2"))
+        self.assertFalse(lib.is_true("10"))
+        self.assertFalse(lib.is_true("yes please"))
+
+    def test_true_is_boolean(self):
+        self.assertTrue(lib.is_boolean("true"))
+        self.assertTrue(lib.is_boolean("tRue"))
+        self.assertTrue(lib.is_boolean("on"))
+        self.assertTrue(lib.is_boolean("ON"))
+        self.assertTrue(lib.is_boolean("yes"))
+        self.assertTrue(lib.is_boolean("yeS"))
+        self.assertTrue(lib.is_boolean("y"))
+        self.assertTrue(lib.is_boolean("Y"))
+        self.assertTrue(lib.is_boolean("1"))
+
+    def test_false_is_boolean(self):
+        self.assertTrue(lib.is_boolean("false"))
+        self.assertTrue(lib.is_boolean("fAlse"))
+        self.assertTrue(lib.is_boolean("off"))
+        self.assertTrue(lib.is_boolean("oFf"))
+        self.assertTrue(lib.is_boolean("no"))
+        self.assertTrue(lib.is_boolean("nO"))
+        self.assertTrue(lib.is_boolean("n"))
+        self.assertTrue(lib.is_boolean("N"))
+        self.assertTrue(lib.is_boolean("0"))
+
+    def test_nonboolean_is_not_boolean(self):
+        self.assertFalse(lib.is_boolean(""))
+        self.assertFalse(lib.is_boolean("a"))
+        self.assertFalse(lib.is_boolean("2"))
+        self.assertFalse(lib.is_boolean("10"))
+        self.assertFalse(lib.is_boolean("yes please"))
+        self.assertFalse(lib.is_boolean(" y"))
+        self.assertFalse(lib.is_boolean("n "))
+        self.assertFalse(lib.is_boolean("NO!"))
+
+
+class TimeoutTest(TestCase):
+    def test_valid(self):
+        self.assertEqual(10, lib.timeout_to_seconds("10"))
+        self.assertEqual(10, lib.timeout_to_seconds("10s"))
+        self.assertEqual(10, lib.timeout_to_seconds("10sec"))
+        self.assertEqual(600, lib.timeout_to_seconds("10m"))
+        self.assertEqual(600, lib.timeout_to_seconds("10min"))
+        self.assertEqual(36000, lib.timeout_to_seconds("10h"))
+        self.assertEqual(36000, lib.timeout_to_seconds("10hr"))
+
+    def test_invalid(self):
+        self.assertEqual(None, lib.timeout_to_seconds("1a1s"))
+        self.assertEqual(None, lib.timeout_to_seconds("10mm"))
+        self.assertEqual(None, lib.timeout_to_seconds("10mim"))
+        self.assertEqual(None, lib.timeout_to_seconds("aaa"))
+        self.assertEqual(None, lib.timeout_to_seconds(""))
+
+        self.assertEqual("1a1s", lib.timeout_to_seconds("1a1s", True))
+        self.assertEqual("10mm", lib.timeout_to_seconds("10mm", True))
+        self.assertEqual("10mim", lib.timeout_to_seconds("10mim", True))
+        self.assertEqual("aaa", lib.timeout_to_seconds("aaa", True))
+        self.assertEqual("", lib.timeout_to_seconds("", True))
+
+
+class ValidateIdTest(TestCase):
+    def test_valid(self):
+        self.assertEqual(None, lib.validate_id("dummy"))
+        self.assertEqual(None, lib.validate_id("DUMMY"))
+        self.assertEqual(None, lib.validate_id("dUmMy"))
+        self.assertEqual(None, lib.validate_id("dummy0"))
+        self.assertEqual(None, lib.validate_id("dum0my"))
+        self.assertEqual(None, lib.validate_id("dummy-"))
+        self.assertEqual(None, lib.validate_id("dum-my"))
+        self.assertEqual(None, lib.validate_id("dummy."))
+        self.assertEqual(None, lib.validate_id("dum.my"))
+        self.assertEqual(None, lib.validate_id("_dummy"))
+        self.assertEqual(None, lib.validate_id("dummy_"))
+        self.assertEqual(None, lib.validate_id("dum_my"))
+
+    def test_invalid_empty(self):
+        assert_raise_library_error(
+            lambda: lib.validate_id("", "test id"),
+            (
+                severity.ERROR,
+                report_codes.INVALID_ID,
+                {
+                    "id": "",
+                    "id_description": "test id",
+                    "reason": "empty",
+                }
+            )
+        )
+
+    def test_invalid_first_character(self):
+        desc = "test id"
+        info = {
+            "id": "",
+            "id_description": desc,
+            "reason": "invalid first character",
+            "invalid_character": "",
+        }
+        report = (severity.ERROR, report_codes.INVALID_ID, info)
+
+        info["id"] = "0"
+        info["invalid_character"] = "0"
+        assert_raise_library_error(
+            lambda: lib.validate_id("0", desc),
+            report
+        )
+
+        info["id"] = "-"
+        info["invalid_character"] = "-"
+        assert_raise_library_error(
+            lambda: lib.validate_id("-", desc),
+            report
+        )
+
+        info["id"] = "."
+        info["invalid_character"] = "."
+        assert_raise_library_error(
+            lambda: lib.validate_id(".", desc),
+            report
+        )
+
+        info["id"] = ":"
+        info["invalid_character"] = ":"
+        assert_raise_library_error(
+            lambda: lib.validate_id(":", desc),
+            report
+        )
+
+        info["id"] = "0dummy"
+        info["invalid_character"] = "0"
+        assert_raise_library_error(
+            lambda: lib.validate_id("0dummy", desc),
+            report
+        )
+
+        info["id"] = "-dummy"
+        info["invalid_character"] = "-"
+        assert_raise_library_error(
+            lambda: lib.validate_id("-dummy", desc),
+            report
+        )
+
+        info["id"] = ".dummy"
+        info["invalid_character"] = "."
+        assert_raise_library_error(
+            lambda: lib.validate_id(".dummy", desc),
+            report
+        )
+
+        info["id"] = ":dummy"
+        info["invalid_character"] = ":"
+        assert_raise_library_error(
+            lambda: lib.validate_id(":dummy", desc),
+            report
+        )
+
+    def test_invalid_character(self):
+        desc = "test id"
+        info = {
+            "id": "",
+            "id_description": desc,
+            "reason": "invalid character",
+            "invalid_character": "",
+        }
+        report = (severity.ERROR, report_codes.INVALID_ID, info)
+
+        info["id"] = "dum:my"
+        info["invalid_character"] = ":"
+        assert_raise_library_error(
+            lambda: lib.validate_id("dum:my", desc),
+            report
+        )
+
+        info["id"] = "dummy:"
+        info["invalid_character"] = ":"
+        assert_raise_library_error(
+            lambda: lib.validate_id("dummy:", desc),
+            report
+        )
+
+        info["id"] = "dum?my"
+        info["invalid_character"] = "?"
+        assert_raise_library_error(
+            lambda: lib.validate_id("dum?my", desc),
+            report
+        )
+
+        info["id"] = "dummy?"
+        info["invalid_character"] = "?"
+        assert_raise_library_error(
+            lambda: lib.validate_id("dummy?", desc),
+            report
+        )
+
+
+class IsScoreValueTest(TestCase):
+    def test_returns_true_for_number(self):
+        self.assertTrue(lib.is_score_value("1"))
+
+    def test_returns_true_for_minus_number(self):
+        self.assertTrue(lib.is_score_value("-1"))
+
+    def test_returns_true_for_plus_number(self):
+        self.assertTrue(lib.is_score_value("+1"))
+
+    def test_returns_true_for_infinity(self):
+        self.assertTrue(lib.is_score_value("INFINITY"))
+
+    def test_returns_true_for_minus_infinity(self):
+        self.assertTrue(lib.is_score_value("-INFINITY"))
+
+    def test_returns_true_for_plus_infinity(self):
+        self.assertTrue(lib.is_score_value("+INFINITY"))
+
+    def test_returns_false_for_nonumber_noinfinity(self):
+        self.assertFalse(lib.is_score_value("something else"))
+
+    def test_returns_false_for_multiple_operators(self):
+        self.assertFalse(lib.is_score_value("++INFINITY"))
diff --git a/pcs/library_acl.py b/pcs/library_acl.py
deleted file mode 100644
index 36de4ec..0000000
--- a/pcs/library_acl.py
+++ /dev/null
@@ -1,135 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import utils
-from errors import ReportItem
-from errors import ReportItemSeverity
-from errors import error_codes
-
-class LibraryError(Exception):
-    pass
-
-class AclRoleNotFound(LibraryError):
-    pass
-
-def __validate_role_id_for_create(dom, role_id):
-    id_valid, message = utils.validate_xml_id(role_id, 'ACL role')
-    if not id_valid:
-        raise LibraryError(ReportItem.error(
-            error_codes.ID_IS_NOT_VALID,
-            message,
-            info={'id': role_id}
-        ))
-    if utils.dom_get_element_with_id(dom, "acl_role", role_id):
-        raise LibraryError(ReportItem.error(
-            error_codes.ACL_ROLE_ALREADY_EXISTS,
-            'role {id} already exists',
-            info={'id': role_id}
-        ))
-    if utils.does_id_exist(dom, role_id):
-        raise LibraryError(ReportItem.error(
-            error_codes.ID_ALREADY_EXISTS,
-            '{id} already exists',
-            info={'id': role_id}
-        ))
-
-def __validate_permissions(dom, permission_info_list):
-    report = []
-    allowed_permissions = ["read", "write", "deny"]
-    allowed_scopes = ["xpath", "id"]
-    for permission, scope_type, scope in permission_info_list:
-        if not permission in allowed_permissions:
-            report.append(ReportItem.error(
-                error_codes.BAD_ACL_PERMISSION,
-                'bad permission "{permission}, expected {allowed_values}',
-                info={
-                    'permission': permission,
-                    'allowed_values_raw': allowed_permissions,
-                    'allowed_values': ' or '.join(allowed_permissions)
-                },
-            ))
-
-        if not scope_type in allowed_scopes:
-            report.append(ReportItem.error(
-                error_codes.BAD_ACL_SCOPE_TYPE,
-                'bad scope type "{scope_type}, expected {allowed_values}',
-                info={
-                    'scope_type': scope_type,
-                    'allowed_values_raw': allowed_scopes,
-                    'allowed_values': ' or '.join(allowed_scopes)
-                },
-            ))
-
-        if scope_type == 'id' and not utils.does_id_exist(dom, scope):
-            report.append(ReportItem.error(
-                error_codes.ID_NOT_FOUND,
-                'id "{id}" does not exist.',
-                info={'id': scope },
-            ))
-
-    if report:
-        raise LibraryError(*report)
-
-def __find_role(dom, role_id):
-    for role in dom.getElementsByTagName("acl_role"):
-        if role.getAttribute("id") == role_id:
-            return role
-
-    raise AclRoleNotFound(ReportItem.error(
-        error_codes.ACL_ROLE_NOT_FOUND,
-        'role id "{role_id}" does not exist.',
-        info={'role_id': role_id},
-    ))
-
-def create_role(dom, role_id, description=''):
-    """
-    role_id id of desired role
-    description role description
-    """
-    __validate_role_id_for_create(dom, role_id)
-    role = dom.createElement("acl_role")
-    role.setAttribute("id",role_id)
-    if description != "":
-        role.setAttribute("description", description)
-    acls = utils.get_acls(dom)
-    acls.appendChild(role)
-
-def provide_role(dom, role_id):
-    """
-    role_id id of desired role
-    description role description
-    """
-    try:
-        __find_role(dom, role_id)
-    except AclRoleNotFound:
-        create_role(dom, role_id)
-
-def add_permissions_to_role(dom, role_id, permission_info_list):
-    """
-    dom document node
-    role_id value of atribute id, which exists in dom
-    permission_info_list list of tuples,
-        each contains (permission, scope_type, scope)
-    """
-    __validate_permissions(dom, permission_info_list)
-
-    area_type_attribute_map = {
-        'xpath': 'xpath',
-        'id': 'reference',
-    }
-    for permission, scope_type, scope in permission_info_list:
-        se = dom.createElement("acl_permission")
-        se.setAttribute(
-            "id",
-            utils.find_unique_id(dom, role_id + "-" + permission)
-        )
-        se.setAttribute("kind", permission)
-        se.setAttribute(area_type_attribute_map[scope_type], scope)
-        __find_role(dom, role_id).appendChild(se)
-
-def remove_permissions_referencing(dom, reference):
-    for permission in dom.getElementsByTagName("acl_permission"):
-        if permission.getAttribute("reference") == reference:
-            permission.parentNode.removeChild(permission)
diff --git a/pcs/node.py b/pcs/node.py
index 06396f6..ac154d4 100644
--- a/pcs/node.py
+++ b/pcs/node.py
@@ -1,12 +1,20 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
 import sys
+import json
 
-import usage
-import utils
+from pcs import (
+    usage,
+    utils,
+)
+from pcs.lib.errors import LibraryError
+import pcs.lib.pacemaker as lib_pacemaker
+from pcs.lib.pacemaker_values import get_valid_timeout_seconds
 
 
 def node_cmd(argv):
@@ -21,6 +29,10 @@ def node_cmd(argv):
         node_maintenance(argv)
     elif sub_cmd == "unmaintenance":
         node_maintenance(argv, False)
+    elif sub_cmd == "standby":
+        node_standby(argv)
+    elif sub_cmd == "unstandby":
+        node_standby(argv, False)
     elif sub_cmd == "utilization":
         if len(argv) == 0:
             print_nodes_utilization()
@@ -28,6 +40,9 @@ def node_cmd(argv):
             print_node_utilization(argv.pop(0))
         else:
             set_node_utilization(argv.pop(0), argv)
+    # pcs-to-pcsd use only
+    elif sub_cmd == "pacemaker-status":
+        node_pacemaker_status()
     else:
         usage.node()
         sys.exit(1)
@@ -79,6 +94,36 @@ def node_maintenance(argv, on=True):
     if failed_count > 0:
         sys.exit(1)
 
+def node_standby(argv, standby=True):
+    if (len(argv) > 1) or (len(argv) > 0 and "--all" in utils.pcs_options):
+        usage.node(["standby" if standby else "unstandby"])
+        sys.exit(1)
+
+    all_nodes = "--all" in utils.pcs_options
+    node_list = [argv[0]] if argv else []
+    wait = False
+    timeout = None
+    if "--wait" in utils.pcs_options:
+        wait = True
+        timeout = utils.pcs_options["--wait"]
+
+    try:
+        if wait:
+            lib_pacemaker.ensure_resource_wait_support(utils.cmd_runner())
+            valid_timeout = get_valid_timeout_seconds(timeout)
+        if standby:
+            lib_pacemaker.nodes_standby(
+                utils.cmd_runner(), node_list, all_nodes
+            )
+        else:
+            lib_pacemaker.nodes_unstandby(
+                utils.cmd_runner(), node_list, all_nodes
+            )
+        if wait:
+            lib_pacemaker.wait_for_resources(utils.cmd_runner(), valid_timeout)
+    except LibraryError as e:
+        utils.process_library_reports(e.args)
+
 def set_node_utilization(node, argv):
     cib = utils.get_cib_dom()
     node_el = utils.dom_get_node(cib, node)
@@ -106,7 +151,15 @@ def print_nodes_utilization():
     for node_el in cib.getElementsByTagName("node"):
         u = utils.get_utilization_str(node_el)
         if u:
-           utilization[node_el.getAttribute("uname")] = u
+            utilization[node_el.getAttribute("uname")] = u
     print("Node Utilization:")
     for node in sorted(utilization):
         print(" {0}: {1}".format(node, utilization[node]))
+
+def node_pacemaker_status():
+    try:
+        print(json.dumps(
+            lib_pacemaker.get_local_node_status(utils.cmd_runner())
+        ))
+    except LibraryError as e:
+        utils.process_library_reports(e.args)
diff --git a/pcs/pcs b/pcs/pcs
deleted file mode 120000
index c24fac1..0000000
--- a/pcs/pcs
+++ /dev/null
@@ -1 +0,0 @@
-pcs.py
\ No newline at end of file
diff --git a/pcs/pcs b/pcs/pcs
new file mode 100755
index 0000000..4585fd5
--- /dev/null
+++ b/pcs/pcs
@@ -0,0 +1,12 @@
+#!/usr/bin/env python
+
+from __future__ import absolute_import
+import os.path
+import sys
+
+PACKAGE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+sys.path.insert(0, PACKAGE_DIR)
+
+from pcs import app
+
+app.main(sys.argv[1:])
diff --git a/pcs/pcs.8 b/pcs/pcs.8
index 86ecdd9..ac0717f 100644
--- a/pcs/pcs.8
+++ b/pcs/pcs.8
@@ -1,4 +1,4 @@
-.TH PCS "8" "February 2016" "pcs 0.9.149" "System Administration Utilities"
+.TH PCS "8" "May 2016" "pcs 0.9.151" "System Administration Utilities"
 .SH NAME
 pcs \- pacemaker/corosync configuration system
 .SH SYNOPSIS
@@ -39,6 +39,9 @@ Set pacemaker properties
 acl
 Set pacemaker access control lists
 .TP
+quorum
+Manage cluster quorum settings
+.TP
 status
 View cluster status
 .TP
@@ -52,7 +55,7 @@ node
 Manage cluster nodes
 .SS "resource"
 .TP
-show [resource id] [\fB\-\-full\fR] [\fB\-\-groups\fR]
+[show [resource id]] [\fB\-\-full\fR] [\fB\-\-groups\fR]
 Show all currently configured resources or if a resource is specified show the options for the configured resource.  If \fB\-\-full\fR is specified all configured resource options will be displayed.  If \fB\-\-groups\fR is specified, only show groups (and their resources).
 .TP
 list [<standard|provider|type>] [\fB\-\-nodesc\fR]
@@ -156,8 +159,8 @@ Set resources listed to unmanaged mode
 defaults [options]
 Set default values for resources, if no options are passed, lists currently configured defaults
 .TP
-cleanup [<resource id>]
-Cleans up the resource in the lrmd (useful to reset the resource status and failcount). This tells the cluster to forget the operation history of a resource and re-detect its current state. This can be useful to purge knowledge of past failures that have since been resolved. If a resource id is not specified then all resources/stonith devices will be cleaned up.
+cleanup [<resource id>] [\fB\-\-node\fR <node>]
+Cleans up the resource in the lrmd (useful to reset the resource status and failcount).  This tells the cluster to forget the operation history of a resource and re-detect its current state.  This can be useful to purge knowledge of past failures that have since been resolved.  If a resource id is not specified then all resources/stonith devices will be cleaned up.  If a node is not specified then resources on all nodes will be cleaned up.
 .TP
 failcount show <resource id> [node]
 Show current failcount for specified resource from all nodes or only on specified node
@@ -184,8 +187,8 @@ Add specified utilization options to specified resource. If resource is not spec
 auth [node] [...] [\fB\-u\fR username] [\fB\-p\fR password] [\fB\-\-force\fR] [\fB\-\-local\fR]
 Authenticate pcs to pcsd on nodes specified, or on all nodes configured in corosync.conf if no nodes are specified (authorization tokens are stored in ~/.pcs/tokens or /var/lib/pcsd/tokens for root).  By default all nodes are also authenticated to each other, using \fB\-\-local\fR only authenticates the local node (and does not authenticate the remote nodes with each other).  Using \fB\-\-force\fR forces re-authentication to occur.
 .TP
-setup [\fB\-\-start\fR] [\fB\-\-local\fR] [\fB\-\-enable\fR] \fB\-\-name\fR <cluster name> <node1[,node1-altaddr]> [node2[,node2-altaddr]] [..] [\fB\-\-transport\fR <udpu|udp>] [\fB\-\-rrpmode\fR active|passive] [\fB\-\-addr0\fR <addr/net> [[[\fB\-\-mcast0\fR <address>] [\fB\-\-mcastport0\fR <port>] [\fB\-\-ttl0\fR <ttl>]] | [\fB\-\-broadcast0\fR]] [\fB\-\-addr1\fR <addr/net> [[[\fB\-\-mcast1\fR <address>] [\fB\-\-mcastport1\fR <port>] [\fB\-\-ttl1\fR <ttl>]] | [\fB\-\-broadcast1\fR]]]]  [...]
-Configure corosync and sync configuration out to listed nodes. \fB\-\-local\fR will only perform changes on the local node, \fB\-\-start\fR will also start the cluster on the specified nodes, \fB\-\-enable\fR will enable corosync and pacemaker on node startup, \fB\-\-transport\fR allows specification of corosync transport (default: udpu; udp for CMAN clusters), \fB\-\-rrpmode\fR allows you to set the RRP mode of the system. Currently only 'passive' is supported or tested (using 'active'  [...]
+setup [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-local\fR] [\fB\-\-enable\fR] \fB\-\-name\fR <cluster name> <node1[,node1-altaddr]> [<node2[,node2-altaddr]>] [...] [\fB\-\-transport\fR udpu|udp] [\fB\-\-rrpmode\fR active|passive] [\fB\-\-addr0\fR <addr/net> [[[\fB\-\-mcast0\fR <address>] [\fB\-\-mcastport0\fR <port>] [\fB\-\-ttl0\fR <ttl>]] | [\fB\-\-broadcast0\fR]] [\fB\-\-addr1\fR <addr/net> [[[\fB\-\-mcast1\fR <address>] [\fB\-\-mcastport1\fR <port>] [\fB\-\-ttl1\fR <ttl>]] | [\ [...]
+Configure corosync and sync configuration out to listed nodes. \fB\-\-local\fR will only perform changes on the local node, \fB\-\-start\fR will also start the cluster on the specified nodes, \fB\-\-wait\fR will wait up to 'n' seconds for the nodes to start, \fB\-\-enable\fR will enable corosync and pacemaker on node startup, \fB\-\-transport\fR allows specification of corosync transport (default: udpu; udp for CMAN clusters), \fB\-\-rrpmode\fR allows you to set the RRP mode of the syste [...]
 
 \fB\-\-ipv6\fR will configure corosync to use ipv6 (instead of ipv4)
 
@@ -217,8 +220,8 @@ be used around the cluster.  \fB\-\-mcast0\fR defaults to 239.255.1.1 and
 ttl defaults to 1. If \fB\-\-broadcast\fR is specified, \fB\-\-mcast0/1\fR,
 \fB\-\-mcastport0/1\fR & \fB\-\-ttl0/1\fR are ignored.
 .TP
-start [\fB\-\-all\fR] [node] [...]
-Start corosync & pacemaker on specified node(s), if a node is not specified then corosync & pacemaker are started on the local node. If \fB\-\-all\fR is specified then corosync & pacemaker are started on all nodes.
+start [\fB\-\-all\fR] [node] [...] [\fB\-\-wait\fR[=<n>]]
+Start corosync & pacemaker on specified node(s), if a node is not specified then corosync & pacemaker are started on the local node. If \fB\-\-all\fR is specified then corosync & pacemaker are started on all nodes. If \fB\-\-wait\fR is specified, wait up to 'n' seconds for nodes to start.
 .TP
 stop [\fB\-\-all\fR] [node] [...]
 Stop corosync & pacemaker on specified node(s), if a node is not specified then corosync & pacemaker are stopped on the local node. If \fB\-\-all\fR is specified then corosync & pacemaker are stopped on all nodes.
@@ -232,12 +235,6 @@ Configure corosync & pacemaker to run on node boot on specified node(s), if node
 disable [\fB\-\-all\fR] [node] [...]
 Configure corosync & pacemaker to not run on node boot on specified node(s), if node is not specified then corosync & pacemaker are disabled on the local node. If \fB\-\-all\fR is specified then corosync & pacemaker are disabled on all nodes. (Note: this is the default after installation)
 .TP
-standby [<node>] | \fB\-\-all\fR
-Put specified node into standby mode (the node specified will no longer be able to host resources), if no node or options are specified the current node will be put into standby mode, if \fB\-\-all\fR is specified all nodes will be put into standby mode.
-.TP
-unstandby [<node>] | \fB\-\-all\fR
-Remove node from standby mode (the node specified will now be able to host resources), if no node or options are specified the current node will be removed from standby mode, if \fB\-\-all\fR is specified all nodes will be removed from standby mode.
-.TP
 remote-node add <hostname> <resource id> [options]
 Enables the specified resource as a remote-node resource on the specified hostname (hostname should be the same as 'uname -n')
 .TP
@@ -268,8 +265,8 @@ Upgrade the CIB to conform to the latest version of the document schema
 edit [scope=<scope> | \fB\-\-config\fR]
 Edit the cib in the editor specified by the $EDITOR environment variable and push out any changes upon saving.  Specify scope to edit a specific section of the CIB.  Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults.  \fB\-\-config\fR is the same as scope=configuration.  Use of \fB\-\-config\fR is recommended.  Do not specify a scope if you need to edit the whole CIB or be warned in the case of outdated CIB.
 .TP
-node add <node[,node\-altaddr]> [\fB\-\-start\fR] [\fB\-\-enable\fR]
-Add the node to corosync.conf and corosync on all nodes in the cluster and sync the new corosync.conf to the new node.  If \fB\-\-start\fR is specified also start corosync/pacemaker on the new node, if \fB\-\-enable\fR is specified enable corosync/pacemaker on new node.  When using Redundant Ring Protocol (RRP) with udpu transport, specify the ring 0 address first followed by a ',' and then the ring 1 address.
+node add <node[,node\-altaddr]> [\fB\-\-start\fR [\fB\-\-wait\fR[=<n>]]] [\fB\-\-enable\fR]
+Add the node to corosync.conf and corosync on all nodes in the cluster and sync the new corosync.conf to the new node.  If \fB\-\-start\fR is specified also start corosync/pacemaker on the new node, if \fB\-\-wait\fR is sepcified wait up to 'n' seconds for the new node to start.  If \fB\-\-enable\fR is specified enable corosync/pacemaker on new node.  When using Redundant Ring Protocol (RRP) with udpu transport, specify the ring 0 address first followed by a ',' and then the ring 1 address.
 .TP
 node remove <node>
 Shutdown specified node and remove it from pacemaker and corosync on all other nodes in the cluster
@@ -299,7 +296,7 @@ report [\fB\-\-from\fR "YYYY\-M\-D H:M:S" [\fB\-\-to\fR "YYYY\-M\-D" H:M:S"]] de
 Create a tarball containing everything needed when reporting cluster problems.  If \fB\-\-from\fR and \fB\-\-to\fR are not used, the report will include the past 24 hours.
 .SS "stonith"
 .TP
-show [stonith id] [\fB\-\-full\fR]
+[show [stonith id]] [\fB\-\-full\fR]
 Show all currently configured stonith devices or if a stonith id is specified show the options for the configured stonith device.  If \fB\-\-full\fR is specified all configured stonith options will be displayed
 .TP
 list [filter] [\fB\-\-nodesc\fR]
@@ -317,8 +314,8 @@ Add/Change options to specified stonith id
 delete <stonith id>
 Remove stonith id from configuration
 .TP
-cleanup [<stonith id>]
-Cleans up the stonith device in the lrmd (useful to reset the status and failcount).  This tells the cluster to forget the operation history of a stonith device and re-detect its current state.  This can be useful to purge knowledge of past failures that have since been resolved. If a stonith id is not specified then all resources/stonith devices will be cleaned up.
+cleanup [<stonith id>] [\fB\-\-node\fR <node>]
+Cleans up the stonith device in the lrmd (useful to reset the status and failcount).  This tells the cluster to forget the operation history of a stonith device and re-detect its current state.  This can be useful to purge knowledge of past failures that have since been resolved.  If a stonith id is not specified then all resources/stonith devices will be cleaned up.  If a node is not specified then resources on all nodes will be cleaned up.
 .TP
 level
 Lists all of the fencing levels currently configured
@@ -387,7 +384,7 @@ permission delete <permission id>
 Remove the permission id specified (permission id's are listed in parenthesis after permissions in 'pcs acl' output)
 .SS "property"
 .TP
-list|show [<property> | \fB\-\-all\fR | \fB\-\-defaults\fR]
+[list|show [<property> | \fB\-\-all\fR | \fB\-\-defaults\fR]] | [\fB\-\-all\fR | \fB\-\-defaults\fR]
 List property settings (default: lists configured properties).  If \fB\-\-defaults\fR is specified will show all property defaults, if \fB\-\-all\fR is specified, current configured properties will be shown with unset properties and their defaults.  Run 'man pengine' and 'man crmd' to get a description of the properties.
 .TP
 set [\fB\-\-force\fR | \fB\-\-node\fR <nodename>] <property>=[<value>] [<property>=[<value>] ...]
@@ -406,7 +403,7 @@ Create a location constraint on a resource to prefer the specified node and scor
 location <resource id> avoids <node[=score]>...
 Create a location constraint on a resource to avoid the specified node and score (default score: INFINITY)
 .TP
-location <resource id> rule [id=<rule id>] [resource-discovery=<option>] [role=master|slave] [constraint-id=<id>] [score=<score>|score-attribute=<attribute>] <expression>
+location <resource id> rule [id=<rule id>] [resource-discovery=<option>] [role=master|slave] [constraint\-id=<id>] [score=<score>|score-attribute=<attribute>] <expression>
 Creates a location rule on the specified resource where the expression looks like one of the following:
 .br
   defined|not_defined <attribute>
@@ -440,10 +437,10 @@ order show [\fB\-\-full\fR]
 List all current ordering constraints (if \fB\-\-full\fR is specified show the internal constraint id's as well).
 .TP
 order [action] <resource id> then [action] <resource id> [options]
-Add an ordering constraint specifying actions (start, stop, promote, demote) and if no action is specified the default action will be start.  Available options are kind=Optional/Mandatory/Serialize, symmetrical=true/false, require-all=true/false and id=<constraint-id>.
+Add an ordering constraint specifying actions (start, stop, promote, demote) and if no action is specified the default action will be start.  Available options are kind=Optional/Mandatory/Serialize, symmetrical=true/false, require-all=true/false and id=<constraint\-id>.
 .TP
-order set <resource1> <resource2> [resourceN]... [options] [set <resourceX> <resourceY> ... [options]] [setoptions [constraint_options]]
-Create an ordered set of resources. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave.  Available constraint_options are id=<constraint-id>, kind=Optional/Mandatory/Serialize and symmetrical=true/false.
+order set <resource1> [resourceN]... [options] [set <resourceX> ... [options]] [setoptions [constraint_options]]
+Create an ordered set of resources. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave.  Available constraint_options are id=<constraint\-id>, kind=Optional/Mandatory/Serialize and symmetrical=true/false.
 .TP
 order remove <resource1> [resourceN]...
 Remove resource from any ordering constraint
@@ -451,15 +448,24 @@ Remove resource from any ordering constraint
 colocation show [\fB\-\-full\fR]
 List all current colocation constraints (if \fB\-\-full\fR is specified show the internal constraint id's as well).
 .TP
-colocation add [master|slave] <source resource id> with [master|slave] <target resource id> [score] [options] [id=constraint-id]
+colocation add [master|slave] <source resource id> with [master|slave] <target resource id> [score] [options] [id=constraint\-id]
 Request <source resource> to run on the same node where pacemaker has determined <target resource> should run.  Positive values of score mean the resources should be run on the same node, negative values mean the resources should not be run on the same node.  Specifying 'INFINITY' (or '\-INFINITY') for the score forces <source resource> to run (or not run) with <target resource> (score defaults to "INFINITY").  A role can be master or slave (if no role is specified, it defaults to 'started').
 .TP
-colocation set <resource1> <resource2> [resourceN]... [options] [set <resourceX> <resourceY> ... [options]] [setoptions [constraint_options]]
+colocation set <resource1> [resourceN]... [options] [set <resourceX> ... [options]] [setoptions [constraint_options]]
 Create a colocation constraint with a resource set. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. Available constraint_options are id, score, score-attribute and score-attribute-mangle.
 .TP
 colocation remove <source resource id> <target resource id>
 Remove colocation constraints with <source resource>
 .TP
+ticket show [\fB\-\-full\fR]
+List all current ticket constraints (if \fB\-\-full\fR is specified show the internal constraint id's as well).
+.TP
+ticket set <resource1> [resourceN]... [options] [set <resourceX> ... [options]] [setoptions [constraint_options]]
+Create a ticket constraint with a resource set. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. Required constraint option is ticket. Available constraint option is loss-policy=fence/stop/freeze/demote 
+.TP
+ticket add <ticket> [<role>] <resource id> [options] [id=constraint\-id]
+Crate a ticket constraint for <resource id>. Available option is loss-policy=fence/stop/freeze/demote. A role can be master, slave, started or stopped.
+.TP
 remove [constraint id]...
 Remove constraint(s) or constraint rules with the specified id(s)
 .TP
@@ -489,6 +495,22 @@ where duration options and date spec options are: hours, monthdays, weekdays, ye
 .TP
 rule remove <rule id>
 Remove a rule if a rule id is specified, if rule is last rule in its constraint, the constraint will be removed
+.SS "quorum"
+.TP
+config
+Show quorum configuration.
+.TP
+device add [generic options] model <device model> [model options]
+Add quorum device to cluster.
+.TP
+device remove
+Remove quorum device from cluster.
+.TP
+device update [generic options] [model <model options>]
+Add/Change quorum device options.
+.TP
+update [auto_tie_breaker=[0|1]] [last_man_standing=[0|1]] [last_man_standing_window=[<time in ms>]] [wait_for_all=[0|1]]
+Add/Change quorum options.  At least one option must be specified.  Options are documented in corosync's votequorum(5) man page.
 .SS "status"
 .TP
 [status] [\fB\-\-full\fR | \fB\-\-hide-inactive\fR]
@@ -554,12 +576,18 @@ clear-auth [\fB\-\-local\fR] [\fB\-\-remote\fR]
 Removes all system tokens which allow pcs/pcsd on the current system to authenticate with remote pcs/pcsd instances and vice\-versa.  After this command is run this node will need to be re\-authenticated with other nodes (using 'pcs cluster auth').  Using \fB\-\-local\fR only removes tokens used by local pcs (and pcsd if root) to connect to other pcsd instances, using \fB\-\-remote\fR clears authentication tokens used by remote systems to connect to the local pcsd instance.
 .SS "node"
 .TP
-maintenance [\fB\-\-all\fR] | [node]...
+maintenance [\fB\-\-all\fR] | [<node>]...
 Put specified node(s) into maintenance mode, if no node or options are specified the current node will be put into maintenance mode, if \fB\-\-all\fR is specified all nodes will be put into maintenace mode.
 .TP
-unmaintenance [\fB\-\-all\fR] | [node]...
+unmaintenance [\fB\-\-all\fR] | [<node>]...
 Remove node(s) from maintenance mode, if no node or options are specified the current node will be removed from maintenance mode, if \fB\-\-all\fR is specified all nodes will be removed from maintenance mode.
 .TP
+standby [\fB\-\-all\fR | <node>] [\fB\-\-wait\fR[=n]]
+Put specified node into standby mode (the node specified will no longer be able to host resources), if no node or options are specified the current node will be put into standby mode, if \fB\-\-all\fR is specified all nodes will be put into standby mode.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the node(s) to be put into standby mode and then return 0 on success or 1 if the operation not succeeded yet.  If 'n' is not specified it defaults to 60 minutes.
+.TP
+unstandby [\fB\-\-all\fR | <node>] [\fB\-\-wait\fR[=n]]
+Remove node from standby mode (the node specified will now be able to host resources), if no node or options are specified the current node will be removed from standby mode, if \fB\-\-all\fR is specified all nodes will be removed from standby mode.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the node(s) to be removed from standby mode and then return 0 on success or 1 if the operation not succeeded yet.  If 'n' is not specified it defaults to 60 minutes.
+.TP
 utilization [<node> [<name>=<value> ...]]
 Add specified utilization options to specified node. If node is not specified, shows utilization of all nodes. If utilization options are not specified, shows utilization of specified node. Utilization option should be in format name=value, value has to be integer. Options may be removed by setting an option without a value. Example: pcs node utilization node1 cpu=4 ram=
 .SH EXAMPLES
diff --git a/pcs/pcsd.py b/pcs/pcsd.py
index 1a83a03..925ce29 100644
--- a/pcs/pcsd.py
+++ b/pcs/pcsd.py
@@ -1,16 +1,17 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
 import sys
 import os
 import errno
-import json
 
-import usage
-import utils
-import settings
+from pcs import usage
+from pcs import utils
+from pcs import settings
 
 
 def pcsd_cmd(argv):
@@ -52,7 +53,7 @@ def pcsd_certkey(argv):
             utils.err(err, False)
         sys.exit(1)
 
-    if not "--force" in utils.pcs_options and (os.path.exists(settings.pcsd_cert_location) or os.path.exists(settings.pcsd_key_location)):
+    if "--force" not in utils.pcs_options and (os.path.exists(settings.pcsd_cert_location) or os.path.exists(settings.pcsd_key_location)):
         utils.err("certificate and/or key already exists, your must use --force to overwrite")
 
     try:
diff --git a/pcs/prop.py b/pcs/prop.py
index 8a60611..3a65990 100644
--- a/pcs/prop.py
+++ b/pcs/prop.py
@@ -1,14 +1,16 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
 import sys
 import json
 from xml.dom.minidom import parseString
 
-import usage
-import utils
+from pcs import usage
+from pcs import utils
 
 def property_cmd(argv):
     if len(argv) == 0:
@@ -103,6 +105,9 @@ def list_property(argv):
     if len(argv) == 0:
         print_all = True
 
+    if "--all" in utils.pcs_options and "--defaults" in utils.pcs_options:
+        utils.err("you cannot specify both --all and --defaults")
+
     if "--all" in utils.pcs_options or "--defaults" in utils.pcs_options:
         if len(argv) != 0:
             utils.err("you cannot specify a property when using --all or --defaults")
diff --git a/pcs/quorum.py b/pcs/quorum.py
new file mode 100644
index 0000000..a63a0b4
--- /dev/null
+++ b/pcs/quorum.py
@@ -0,0 +1,163 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import sys
+
+from pcs import (
+    usage,
+    utils,
+)
+from pcs.cli.common import parse_args
+from pcs.cli.common.console_report import indent
+from pcs.cli.common.errors import CmdLineInputError
+from pcs.lib.errors import LibraryError
+
+def quorum_cmd(lib, argv, modificators):
+    if len(argv) < 1:
+        usage.quorum()
+        sys.exit(1)
+
+    sub_cmd, argv_next = argv[0], argv[1:]
+    try:
+        if sub_cmd == "help":
+            usage.quorum(argv)
+        elif sub_cmd == "config":
+            quorum_config_cmd(lib, argv_next, modificators)
+        elif sub_cmd == "device":
+            quorum_device_cmd(lib, argv_next, modificators)
+        elif sub_cmd == "update":
+            quorum_update_cmd(lib, argv_next, modificators)
+        else:
+            raise CmdLineInputError()
+    except LibraryError as e:
+        utils.process_library_reports(e.args)
+    except CmdLineInputError as e:
+        utils.exit_on_cmdline_input_errror(e, "quorum", sub_cmd)
+
+def quorum_device_cmd(lib, argv, modificators):
+    if len(argv) < 1:
+        raise CmdLineInputError()
+
+    sub_cmd, argv_next = argv[0], argv[1:]
+    try:
+        if sub_cmd == "add":
+            quorum_device_add_cmd(lib, argv_next, modificators)
+        elif sub_cmd == "remove":
+            quorum_device_remove_cmd(lib, argv_next, modificators)
+        elif sub_cmd == "update":
+            quorum_device_update_cmd(lib, argv_next, modificators)
+        else:
+            raise CmdLineInputError()
+    except CmdLineInputError as e:
+        utils.exit_on_cmdline_input_errror(
+            e, "quorum", "device {0}".format(sub_cmd)
+        )
+
+def quorum_config_cmd(lib, argv, modificators):
+    if argv:
+        raise CmdLineInputError()
+    config = lib.quorum.get_config()
+    print("\n".join(quorum_config_to_str(config)))
+
+def quorum_config_to_str(config):
+    lines = []
+
+    lines.append("Options:")
+    if "options" in config and config["options"]:
+        lines.extend(indent([
+            "{n}: {v}".format(n=name, v=value)
+            for name, value in sorted(config["options"].items())
+        ]))
+
+    if "device" in config and config["device"]:
+        lines.append("Device:")
+        lines.extend(indent([
+            "{n}: {v}".format(n=name, v=value)
+            for name, value in sorted(
+                config["device"].get("generic_options", {}).items()
+            )
+        ]))
+        model_settings = [
+            "Model: {m}".format(m=config["device"].get("model", ""))
+        ]
+        model_settings.extend(indent([
+            "{n}: {v}".format(n=name, v=value)
+            for name, value in sorted(
+                config["device"].get("model_options", {}).items()
+            )
+        ]))
+        lines.extend(indent(model_settings))
+
+    return lines
+
+def quorum_device_add_cmd(lib, argv, modificators):
+    # we expect "model" keyword once, followed by the actual model value
+    options_lists = parse_args.split_list(argv, "model")
+    if len(options_lists) != 2:
+        raise CmdLineInputError()
+    # check if model value was specified
+    if not options_lists[1] or "=" in options_lists[1][0]:
+        raise CmdLineInputError()
+    generic_options = parse_args.prepare_options(options_lists[0])
+    model = options_lists[1][0]
+    model_options = parse_args.prepare_options(options_lists[1][1:])
+
+    if "model" in generic_options:
+        raise CmdLineInputError(
+            "Model cannot be specified in generic options"
+        )
+
+    lib.quorum.add_device(
+        model,
+        model_options,
+        generic_options,
+        force_model=modificators["force"],
+        force_options=modificators["force"],
+        skip_offline_nodes=modificators["skip_offline_nodes"]
+    )
+
+def quorum_device_remove_cmd(lib, argv, modificators):
+    if argv:
+        raise CmdLineInputError()
+
+    lib.quorum.remove_device(
+        skip_offline_nodes=modificators["skip_offline_nodes"]
+    )
+
+def quorum_device_update_cmd(lib, argv, modificators):
+    # we expect "model" keyword once
+    options_lists = parse_args.split_list(argv, "model")
+    if len(options_lists) == 1:
+        generic_options = parse_args.prepare_options(options_lists[0])
+        model_options = dict()
+    elif len(options_lists) == 2:
+        generic_options = parse_args.prepare_options(options_lists[0])
+        model_options = parse_args.prepare_options(options_lists[1])
+    else:
+        raise CmdLineInputError()
+
+    if "model" in generic_options:
+        raise CmdLineInputError(
+            "Model cannot be specified in generic options"
+        )
+
+    lib.quorum.update_device(
+        model_options,
+        generic_options,
+        force_options=modificators["force"],
+        skip_offline_nodes=modificators["skip_offline_nodes"]
+    )
+
+def quorum_update_cmd(lib, argv, modificators):
+    options = parse_args.prepare_options(argv)
+    if not options:
+        raise CmdLineInputError()
+
+    lib.quorum.set_options(
+        options,
+        skip_offline_nodes=modificators["skip_offline_nodes"]
+    )
diff --git a/pcs/resource.py b/pcs/resource.py
index 022732b..0dfdb03 100644
--- a/pcs/resource.py
+++ b/pcs/resource.py
@@ -1,7 +1,9 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
 import sys
 import os
@@ -11,12 +13,21 @@ from xml.dom.minidom import parseString
 import re
 import textwrap
 import time
-
-import usage
-import utils
-import constraint
-import stonith
-import library_acl as lib_acl
+import json
+
+from pcs import (
+    usage,
+    utils,
+    constraint,
+    settings,
+)
+import pcs.lib.cib.acl as lib_acl
+import pcs.lib.pacemaker as lib_pacemaker
+from pcs.cli.common.errors import CmdLineInputError
+from pcs.cli.common.parse_args import prepare_options
+from pcs.lib.errors import LibraryError
+from pcs.lib.pacemaker_values import timeout_to_seconds
+import pcs.lib.resource_agent as lib_ra
 
 
 PACEMAKER_WAIT_TIMEOUT_STATUS = 62
@@ -46,7 +57,10 @@ def resource_cmd(argv):
         ra_values, op_values, meta_values, clone_opts = parse_resource_options(
             argv, with_clone=True
         )
-        resource_create(res_id, res_type, ra_values, op_values, meta_values, clone_opts)
+        try:
+            resource_create(res_id, res_type, ra_values, op_values, meta_values, clone_opts)
+        except CmdLineInputError as e:
+            utils.exit_on_cmdline_input_errror(e, "resource", 'create')
     elif (sub_cmd == "move"):
         resource_move(argv)
     elif (sub_cmd == "ban"):
@@ -64,7 +78,10 @@ def resource_cmd(argv):
             usage.resource()
             sys.exit(1)
         res_id = argv.pop(0)
-        resource_update(res_id,argv)
+        try:
+            resource_update(res_id,argv)
+        except CmdLineInputError as e:
+            utils.exit_on_cmdline_input_errror(e, "resource", 'update')
     elif (sub_cmd == "add_operation"):
         utils.err("add_operation has been deprecated, please use 'op add'")
     elif (sub_cmd == "remove_operation"):
@@ -88,11 +105,17 @@ def resource_cmd(argv):
     elif (sub_cmd == "ungroup"):
         resource_group(["remove"] + argv)
     elif (sub_cmd == "clone"):
-        resource_clone(argv)
+        try:
+            resource_clone(argv)
+        except CmdLineInputError as e:
+            utils.exit_on_cmdline_input_errror(e, "resource", 'clone')
     elif (sub_cmd == "unclone"):
         resource_clone_master_remove(argv)
     elif (sub_cmd == "master"):
-        resource_master(argv)
+        try:
+            resource_master(argv)
+        except CmdLineInputError as e:
+            utils.exit_on_cmdline_input_errror(e, "resource", 'master')
     elif (sub_cmd == "enable"):
         resource_enable(argv)
     elif (sub_cmd == "disable"):
@@ -147,11 +170,12 @@ def resource_cmd(argv):
         else:
             set_default("rsc_defaults", argv)
     elif (sub_cmd == "cleanup"):
-        if len(argv) == 0:
-            resource_cleanup_all()
-        else:
-            res_id = argv.pop(0)
-            resource_cleanup(res_id)
+        try:
+            resource_cleanup(argv)
+        except CmdLineInputError as e:
+            utils.exit_on_cmdline_input_errror(e, "resource", 'cleanup')
+        except LibraryError as e:
+            utils.process_library_reports(e.args)
     elif (sub_cmd == "history"):
         resource_history(argv)
     elif (sub_cmd == "relocate"):
@@ -163,6 +187,8 @@ def resource_cmd(argv):
             print_resource_utilization(argv.pop(0))
         else:
             set_resource_utilization(argv.pop(0), argv)
+    elif (sub_cmd == "get_resource_agent_info"):
+        get_resource_agent_info(argv)
     else:
         usage.resource()
         sys.exit(1)
@@ -212,20 +238,14 @@ def parse_resource_options(argv, with_clone=False):
 # List available resources
 # TODO make location more easily configurable
 def resource_list_available(argv):
-    def get_name_and_desc(full_res_name, metadata):
+    def get_name_and_desc(agent_name, shortdesc):
         sd = ""
-        try:
-            dom = parseString(metadata)
-            shortdesc = dom.documentElement.getElementsByTagName("shortdesc")
-            if len(shortdesc) > 0:
-                sd = " - " +  format_desc(
-                    len(full_res_name + " - "),
-                    shortdesc[0].firstChild.nodeValue.strip().replace("\n", " ")
-                )
-        except xml.parsers.expat.ExpatError:
-            sd = ""
-        finally:
-            return full_res_name + sd
+        if len(shortdesc) > 0:
+            sd = " - " + format_desc(
+                len(agent_name + " - "),
+                shortdesc.replace("\n", " ")
+            )
+        return agent_name + sd
 
     ret = []
     if len(argv) != 0:
@@ -234,10 +254,11 @@ def resource_list_available(argv):
         filter_string = ""
 
     # ocf agents
-    os.environ['OCF_ROOT'] = "/usr/lib/ocf/"
-    providers = sorted(os.listdir("/usr/lib/ocf/resource.d"))
+    providers = sorted(os.listdir(settings.ocf_resources))
     for provider in providers:
-        resources = sorted(os.listdir("/usr/lib/ocf/resource.d/" + provider))
+        resources = sorted(os.listdir(os.path.join(
+            settings.ocf_resources, provider
+        )))
         for resource in resources:
             if resource.startswith(".") or resource == "ocf-shellfuncs":
                 continue
@@ -249,13 +270,16 @@ def resource_list_available(argv):
                 ret.append(full_res_name)
                 continue
 
-            metadata = utils.get_metadata("/usr/lib/ocf/resource.d/" + provider + "/" + resource)
-            if metadata == False:
-                continue
-            ret.append(get_name_and_desc(
-                "ocf:" + provider + ":" + resource,
-                metadata
-            ))
+            try:
+                metadata = lib_ra.get_resource_agent_metadata(
+                    utils.cmd_runner(), full_res_name
+                )
+                ret.append(get_name_and_desc(
+                    full_res_name,
+                    lib_ra.get_agent_desc(metadata)["shortdesc"]
+                ))
+            except LibraryError:
+                pass
 
     # lsb agents
     lsb_dir = "/etc/init.d/"
@@ -266,7 +290,7 @@ def resource_list_available(argv):
 
     # systemd agents
     if utils.is_systemctl():
-        agents, retval = utils.run(["systemctl", "list-unit-files", "--full"])
+        agents, dummy_retval = utils.run(["systemctl", "list-unit-files", "--full"])
         agents = agents.split("\n")
     for agent in agents:
         match = re.search(r'^([\S]*)\.service',agent)
@@ -274,9 +298,8 @@ def resource_list_available(argv):
             ret.append("systemd:" + match.group(1))
 
     # nagios metadata
-    nagios_metadata_path = "/usr/share/pacemaker/nagios/plugins-metadata"
-    if os.path.isdir(nagios_metadata_path):
-        for metadata_file in sorted(os.listdir(nagios_metadata_path)):
+    if os.path.isdir(settings.nagios_metadata_path):
+        for metadata_file in sorted(os.listdir(settings.nagios_metadata_path)):
             if metadata_file.startswith("."):
                 continue
             full_res_name = "nagios:" + metadata_file
@@ -286,14 +309,15 @@ def resource_list_available(argv):
                 ret.append(full_res_name)
                 continue
             try:
+                metadata = lib_ra.get_resource_agent_metadata(
+                    utils.cmd_runner(),
+                    full_res_name
+                )
                 ret.append(get_name_and_desc(
                     full_res_name,
-                    open(
-                        os.path.join(nagios_metadata_path, metadata_file),
-                        "r"
-                    ).read()
+                    lib_ra.get_agent_desc(metadata)["shortdesc"]
                 ))
-            except EnvironmentError as e:
+            except LibraryError:
                 pass
 
     # output
@@ -313,115 +337,96 @@ def resource_list_available(argv):
     else:
         print("\n".join(ret))
 
-def resource_parse_options(metadata, standard, provider, resource):
-    try:
-        short_desc = ""
-        long_desc = ""
-        dom = parseString(metadata)
-        long_descs = dom.documentElement.getElementsByTagName("longdesc")
-        for ld in long_descs:
-            if ld.parentNode.tagName == "resource-agent" and ld.firstChild:
-                long_desc = ld.firstChild.data.strip()
-                break
-
-        short_descs = dom.documentElement.getElementsByTagName("shortdesc")
-        for sd in short_descs:
-            if sd.parentNode.tagName == "resource-agent" and sd.firstChild:
-                short_desc = sd.firstChild.data.strip()
-                break
-
-        if provider:
-            title_1 = "%s:%s:%s" % (standard, provider, resource)
-        else:
-            title_1 = "%s:%s" % (standard, resource)
 
-        if short_desc:
-            title_1 += " - " + format_desc(len(title_1 + " - "), short_desc)
-        print(title_1)
+def resource_print_options(agent_name, desc, params):
+    if desc["shortdesc"]:
+        agent_name += " - " + format_desc(
+            len(agent_name + " - "), desc["shortdesc"]
+        )
+    print(agent_name)
+    print()
+    if desc["longdesc"]:
+        print(desc["longdesc"])
         print()
-        if long_desc:
-            print(long_desc)
-            print()
 
-        params = dom.documentElement.getElementsByTagName("parameter")
-        if len(params) > 0:
-            print("Resource options:")
-        for param in params:
-            name = param.getAttribute("name")
-            if param.getAttribute("required") == "1":
-                name += " (required)"
-            desc = ""
-            longdesc_els = param.getElementsByTagName("longdesc")
-            if longdesc_els and longdesc_els[0].firstChild:
-                desc = longdesc_els[0].firstChild.nodeValue.strip().replace("\n", " ")
+    if len(params) > 0:
+        print("Resource options:")
+    for param in params:
+        if param.get("advanced", False):
+            continue
+        name = param["name"]
+        if param["required"]:
+            name += " (required)"
+        desc = param["longdesc"].replace("\n", " ")
+        if not desc:
+            desc = param["shortdesc"].replace("\n", " ")
             if not desc:
                 desc = "No description available"
-            indent = name.__len__() + 4
-            desc = format_desc(indent, desc)
-            print("  " + name + ": " + desc)
-    except xml.parsers.expat.ExpatError as e:
-        utils.err("Unable to parse xml for '%s': %s" % (resource, e))
+        indent = len(name) + 4
+        desc = format_desc(indent, desc)
+        print("  " + name + ": " + desc)
+
 
 def resource_list_options(resource):
-    found_resource = False
-    resource = get_full_ra_type(resource,True)
+    runner = utils.cmd_runner()
 
-    # we know this is the nagios resource standard
-    if "nagios:" in resource:
-        resource_split = resource.split(":",2)
-        resource = resource_split[1]
-        standard = "nagios"
-        try:
-            with open("/usr/share/pacemaker/nagios/plugins-metadata/" + resource + ".xml",'r') as f:
-                resource_parse_options(f.read(), standard, None, resource)
-        except IOError as e:
-            utils.err ("Unable to find resource: %s" % resource)
-        return
+    def get_desc_params(agent_name):
+        metadata_dom = lib_ra.get_resource_agent_metadata(
+            runner, agent_name
+        )
+        desc = lib_ra.get_agent_desc(metadata_dom)
+        params = lib_ra.get_resource_agent_parameters(metadata_dom)
+        return desc, params
 
-    # we know this is the nagios resource standard
-    if "ocf:" in resource:
-        resource_split = resource.split(":",3)
-        provider = resource_split[1]
-        resource = resource_split[2]
-        standard = "ocf"
-        metadata = utils.get_metadata("/usr/lib/ocf/resource.d/" + provider + "/" + resource)
-        if metadata:
-            resource_parse_options(metadata, standard, provider, resource)
-        else:
-            utils.err ("Unable to find resource: %s" % resource)
+    found_resource = False
+
+    try:
+        descriptions, parameters = get_desc_params(resource)
+        resource_print_options(resource, descriptions, parameters)
         return
+    except lib_ra.UnsupportedResourceAgent:
+        pass
+    except LibraryError as e:
+        utils.process_library_reports(e.args)
 
     # no standard was give, lets search all ocf providers first
-    providers = sorted(os.listdir("/usr/lib/ocf/resource.d"))
+    providers = sorted(os.listdir(settings.ocf_resources))
     for provider in providers:
-        metadata = utils.get_metadata("/usr/lib/ocf/resource.d/" + provider + "/" + resource)
-        if metadata == False:
+        if not os.path.exists(
+            os.path.join(settings.ocf_resources, provider, resource)
+        ):
             continue
-        else:
-            resource_parse_options(metadata, "ocf", provider, resource)
-            found_resource = True
+        try:
+            agent = "ocf:{0}:{1}".format(provider, resource)
+            descriptions, parameters = get_desc_params(agent)
+            resource_print_options(agent, descriptions, parameters)
+            return
+        except LibraryError:
+            pass
 
     # still not found, now lets look at nagios plugins
     if not found_resource:
         try:
-            with open("/usr/share/pacemaker/nagios/plugins-metadata/" + resource + ".xml",'r') as f:
-                resource_parse_options(f.read(), "nagios", None, resource)
-        except IOError as e:
-            utils.err ("Unable to find resource: %s" % resource)
+            agent = "nagios:" + resource
+            descriptions, parameters = get_desc_params(agent)
+            resource_print_options(agent, descriptions, parameters)
+        except LibraryError:
+            utils.err("Unable to find resource: {0}".format(resource))
 
 # Return the string formatted with a line length of 79 and indented
 def format_desc(indent, desc):
     desc = " ".join(desc.split())
-    rows, columns = utils.getTerminalSize()
+    dummy_rows, columns = utils.getTerminalSize()
     columns = int(columns)
-    if columns < 40: columns = 40
+    if columns < 40:
+        columns = 40
     afterindent = columns - indent
     output = ""
     first = True
 
     for line in textwrap.wrap(desc, afterindent):
         if not first:
-            for i in range(0,indent):
+            for _ in range(0,indent):
                 output += " "
         output += line
         output += "\n"
@@ -455,12 +460,12 @@ def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_
 
     # If we're not using --force, try to change the case of ra_type to match any
     # installed resources
-    if not "--force" in utils.pcs_options:
+    if "--force" not in utils.pcs_options:
         new_ra_type = utils.is_valid_resource(ra_type, True)
         if new_ra_type != True and new_ra_type != False:
             ra_type = new_ra_type
 
-    if not utils.is_valid_resource(ra_type) and not ("--force" in utils.pcs_options):
+    if not utils.is_valid_resource(ra_type) and "--force" not in utils.pcs_options:
         utils.err ("Unable to create resource '%s', it is not installed on this system (use --force to override)" % ra_type)
 
     if utils.does_id_exist(dom, ra_id):
@@ -481,7 +486,7 @@ def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_
     # If the user specifies an operation value and we find a similar one in
     # the default operations we remove if from the default operations
     op_values_agent = []
-    if "--no-default-ops" not in utils.pcs_options: 
+    if "--no-default-ops" not in utils.pcs_options:
         default_op_values = utils.get_default_op_values(ra_type)
         for def_op in default_op_values:
             match = False
@@ -505,7 +510,7 @@ def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_
                 continue
             match = re.match("interval=(.+)", op_setting)
             if match:
-                interval = utils.get_timeout_seconds(match.group(1))
+                interval = timeout_to_seconds(match.group(1))
                 if interval is not None:
                     if interval in action_intervals[op_action]:
                         old_interval = interval
@@ -528,8 +533,6 @@ def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_
     if not is_monitor_present:
         op_values.append(['monitor'])
 
-    op_values_all = op_values_agent + op_values
-
     if "--disabled" in utils.pcs_options:
         meta_values = [
             meta for meta in meta_values if not meta.startswith("target-role=")
@@ -546,12 +549,20 @@ def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_
     primitive_values = get_full_ra_type(ra_type)
     primitive_values.insert(0,("id",ra_id))
     meta_attributes = convert_args_to_meta_attrs(meta_values, ra_id)
-    if not "--force" in utils.pcs_options and utils.does_resource_have_options(ra_type):
+    if "--force" not in utils.pcs_options and utils.does_resource_have_options(ra_type):
         params = utils.convert_args_to_tuples(ra_values)
-        bad_opts, missing_req_opts = utils.validInstanceAttributes(ra_id, params , get_full_ra_type(ra_type, True))
+        bad_opts, missing_req_opts = [], []
+        try:
+            bad_opts, missing_req_opts = lib_ra.validate_instance_attributes(
+                utils.cmd_runner(),
+                dict(params),
+                get_full_ra_type(ra_type, True)
+            )
+        except LibraryError as e:
+            utils.process_library_reports(e.args)
         if len(bad_opts) != 0:
             utils.err ("resource option(s): '%s', are not recognized for resource type: '%s' (use --force to override)" \
-                    % (", ".join(bad_opts), get_full_ra_type(ra_type, True)))
+                    % (", ".join(sorted(bad_opts)), get_full_ra_type(ra_type, True)))
         if len(missing_req_opts) != 0:
             utils.err(
                 "missing required option(s): '%s' for resource type: %s"
@@ -571,13 +582,13 @@ def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_
         )
 
     if "--clone" in utils.pcs_options or len(clone_opts) > 0:
-        dom, clone_id = resource_clone_create(dom, [ra_id] + clone_opts)
+        dom, dummy_clone_id = resource_clone_create(dom, [ra_id] + clone_opts)
         if "--group" in utils.pcs_options:
             print("Warning: --group ignored when creating a clone")
         if "--master" in utils.pcs_options:
             print("Warning: --master ignored when creating a clone")
     elif "--master" in utils.pcs_options:
-        dom, master_id = resource_master_create(
+        dom, dummy_master_id = resource_master_create(
             dom, [ra_id] + master_meta_values
         )
         if "--group" in utils.pcs_options:
@@ -801,7 +812,7 @@ def resource_move(argv,clear=False,ban=False):
             utils.err("\n".join(msg).strip())
 
 def resource_standards(return_output=False):
-    output, retval = utils.run(["crm_resource","--list-standards"], True)
+    output, dummy_retval = utils.run(["crm_resource","--list-standards"], True)
     # Return value is ignored because it contains the number of standards
     # returned, not an error code
     output = output.strip()
@@ -810,7 +821,7 @@ def resource_standards(return_output=False):
     print(output)
 
 def resource_providers():
-    output, retval = utils.run(["crm_resource","--list-ocf-providers"],True)
+    output, dummy_retval = utils.run(["crm_resource","--list-ocf-providers"],True)
     # Return value is ignored because it contains the number of providers
     # returned, not an error code
     print(output.strip())
@@ -826,7 +837,7 @@ def resource_agents(argv):
         standards = output.split('\n')
 
     for s in standards:
-        output, retval = utils.run(["crm_resource", "--list-agents", s])
+        output, dummy_retval = utils.run(["crm_resource", "--list-agents", s])
         preg = re.compile(r'\d+ agents found for standard.*$', re.MULTILINE)
         output = preg.sub("", output)
         output = output.strip()
@@ -846,39 +857,22 @@ def resource_update(res_id,args):
         wait_timeout = utils.validate_wait_get_timeout()
         wait = True
 
-    resource = None
-    for r in dom.getElementsByTagName("primitive"):
-        if r.getAttribute("id") == res_id:
-            resource = r
-            break
-
+    resource = utils.dom_get_resource(dom, res_id)
     if not resource:
-        clone = None
-        for c in dom.getElementsByTagName("clone"):
-            if c.getAttribute("id") == res_id:
-                clone = r
-                break
-
+        clone = utils.dom_get_clone(dom, res_id)
         if clone:
-            for a in c.childNodes:
-                if a.localName == "primitive" or a.localName == "group":
-                    return resource_update_clone_master(
-                        dom, clone, "clone", a.getAttribute("id"), args,
-                        wait, wait_timeout
-                    )
-
-        master = None
-        for m in dom.getElementsByTagName("master"):
-            if m.getAttribute("id") == res_id:
-                master = r 
-                break
-
+            clone_child = utils.dom_elem_get_clone_ms_resource(clone)
+            if clone_child:
+                child_id = clone_child.getAttribute("id")
+                return resource_update_clone_master(
+                    dom, clone, "clone", child_id, args, wait, wait_timeout
+                )
+        master = utils.dom_get_master(dom, res_id)
         if master:
             return resource_update_clone_master(
                 dom, master, "master", res_id, args, wait, wait_timeout
             )
-
-        utils.err ("Unable to find resource: %s" % res_id)
+        utils.err("Unable to find resource: %s" % res_id)
 
     instance_attributes = resource.getElementsByTagName("instance_attributes")
     if len(instance_attributes) == 0:
@@ -889,7 +883,7 @@ def resource_update(res_id,args):
         instance_attributes = instance_attributes[0]
 
     params = utils.convert_args_to_tuples(ra_values)
-    if not "--force" in utils.pcs_options and (resource.getAttribute("class") == "ocf" or resource.getAttribute("class") == "stonith"):
+    if "--force" not in utils.pcs_options and (resource.getAttribute("class") == "ocf" or resource.getAttribute("class") == "stonith"):
         resClass = resource.getAttribute("class")
         resProvider = resource.getAttribute("provider")
         resType = resource.getAttribute("type")
@@ -897,10 +891,16 @@ def resource_update(res_id,args):
             resource_type = resClass + ":" + resType
         else:
             resource_type = resClass + ":" + resProvider + ":" + resType
-        bad_opts, missing_req_opts = utils.validInstanceAttributes(res_id, params, resource_type)
+        bad_opts = []
+        try:
+            bad_opts, _ = lib_ra.validate_instance_attributes(
+                utils.cmd_runner(), dict(params), resource_type
+            )
+        except LibraryError as e:
+            utils.process_library_reports(e.args)
         if len(bad_opts) != 0:
             utils.err ("resource option(s): '%s', are not recognized for resource type: '%s' (use --force to override)" \
-                    % (", ".join(bad_opts), utils.getResourceType(resource)))
+                    % (", ".join(sorted(bad_opts)), utils.getResourceType(resource)))
 
 
     for (key,val) in params:
@@ -1017,11 +1017,11 @@ def resource_update_clone_master(
     dom, clone, clone_type, res_id, args, wait, wait_timeout
 ):
     if clone_type == "clone":
-        dom, clone_id = resource_clone_create(dom, [res_id] + args, True)
+        dom, dummy_clone_id = resource_clone_create(dom, [res_id] + args, True)
     elif clone_type == "master":
-        dom, master_id = resource_master_create(dom, [res_id] + args, True)
+        dom, dummy_master_id = resource_master_create(dom, [res_id] + args, True)
 
-    dom = utils.replace_cib_configuration(dom)
+    utils.replace_cib_configuration(dom)
 
     if wait:
         args = ["crm_resource", "--wait"]
@@ -1125,7 +1125,7 @@ def resource_operation_add(
                     "operation %s with interval %ss already specified for %s:\n%s"
                     % (
                         op_el.getAttribute("name"),
-                        utils.get_timeout_seconds(
+                        timeout_to_seconds(
                             op_el.getAttribute("interval"), True
                         ),
                         res_id,
@@ -1431,21 +1431,17 @@ def resource_clone_create(cib_dom, argv, update_existing=False):
         if element.parentNode.tagName != "clone":
             utils.err("%s is not currently a clone" % name)
         clone = element.parentNode
-        for child in clone.childNodes:
-            if (
-                child.nodeType == child.ELEMENT_NODE
-                and
-                child.tagName == "meta_attributes"
-            ):
-                meta = child
-                break
     else:
         clone = cib_dom.createElement("clone")
         clone.setAttribute("id", utils.find_unique_id(cib_dom, name + "-clone"))
         clone.appendChild(element)
         re.appendChild(clone)
 
-    utils.dom_update_meta_attr(clone, utils.convert_args_to_tuples(argv))
+    generic_values, op_values, meta_values = parse_resource_options(argv)
+    if op_values:
+        utils.err("op settings must be changed on base resource, not the clone")
+    final_meta = prepare_options(generic_values + meta_values)
+    utils.dom_update_meta_attr(clone, sorted(final_meta.items()))
 
     return cib_dom, clone.getAttribute("id")
 
@@ -1605,10 +1601,12 @@ def resource_master_create(dom, argv, update=False, master_id=None):
         resources.appendChild(master_element)
 
     if len(argv) > 0:
-        utils.dom_update_meta_attr(
-            master_element,
-            utils.convert_args_to_tuples(argv)
-        )
+        generic_values, op_values, meta_values = parse_resource_options(argv)
+        if op_values:
+            utils.err("op settings must be changed on base resource, not the master")
+        final_meta = prepare_options(generic_values + meta_values)
+        utils.dom_update_meta_attr(master_element, list(final_meta.items()))
+
     return dom, master_element.getAttribute("id")
 
 def resource_master_remove(argv):
@@ -1635,12 +1633,11 @@ def resource_master_remove(argv):
             break
 
     if not master_found:
-            utils.err("Unable to find multi-state resource with id %s" % master_id)
+        utils.err("Unable to find multi-state resource with id %s" % master_id)
 
     constraints_element = dom.getElementsByTagName("constraints")
     if len(constraints_element) > 0:
         constraints_element = constraints_element[0]
-        constraints = []
         for resource_id in resources_to_cleanup:
             remove_resource_references(
                 dom, resource_id, constraints_element=constraints_element
@@ -1661,7 +1658,7 @@ def resource_remove(resource_id, output = True):
         group_dom = parseString(group)
         print("Stopping all resources in group: %s..." % resource_id)
         resource_disable([resource_id])
-        if not "--force" in utils.pcs_options and not utils.usefile:
+        if "--force" not in utils.pcs_options and not utils.usefile:
             output, retval = utils.run(["crm_resource", "--wait"])
             if retval != 0 and "unrecognized option '--wait'" in output:
                 output = ""
@@ -1671,7 +1668,7 @@ def resource_remove(resource_id, output = True):
                 ):
                     res_id = res.getAttribute("id")
                     res_stopped = False
-                    for i in range(15):
+                    for _ in range(15):
                         time.sleep(1)
                         if not utils.resource_running_on(res_id)["is_running"]:
                             res_stopped = True
@@ -1712,7 +1709,7 @@ def resource_remove(resource_id, output = True):
         num_resources_in_group = len(parseString(group).documentElement.getElementsByTagName("primitive"))
 
     if (
-        not "--force" in utils.pcs_options
+        "--force" not in utils.pcs_options
         and
         not utils.usefile
         and
@@ -1725,7 +1722,7 @@ def resource_remove(resource_id, output = True):
         if retval != 0 and "unrecognized option '--wait'" in output:
             output = ""
             retval = 0
-            for i in range(15):
+            for _ in range(15):
                 time.sleep(1)
                 if not utils.resource_running_on(resource_id)["is_running"]:
                     break
@@ -1810,21 +1807,39 @@ def resource_remove(resource_id, output = True):
         args = ["cibadmin", "-o", "resources", "-D", "--xpath", to_remove_xpath]
         if output == True:
             print("Deleting Resource ("+msg+") - " + resource_id)
-        cmdoutput,retVal = utils.run(args)
+        dummy_cmdoutput,retVal = utils.run(args)
         if retVal != 0:
             if output == True:
                 utils.err("Unable to remove resource '%s' (do constraints exist?)" % (resource_id))
             return False
     return True
 
+def stonith_level_rm_device(cib_dom, stn_id):
+    topology_el_list = cib_dom.getElementsByTagName("fencing-topology")
+    if not topology_el_list:
+        return cib_dom
+    topology_el = topology_el_list[0]
+    for level_el in topology_el.getElementsByTagName("fencing-level"):
+        device_list = level_el.getAttribute("devices").split(",")
+        if stn_id in device_list:
+            new_device_list = [dev for dev in device_list if dev != stn_id]
+            if new_device_list:
+                level_el.setAttribute("devices", ",".join(new_device_list))
+            else:
+                level_el.parentNode.removeChild(level_el)
+    if not topology_el.getElementsByTagName("fencing-level"):
+        topology_el.parentNode.removeChild(topology_el)
+    return cib_dom
+
+
 def remove_resource_references(
     dom, resource_id, output=False, constraints_element=None
 ):
     constraint.remove_constraints_containing(
         resource_id, output, constraints_element, dom
     )
-    stonith.stonith_level_rm_device(dom, resource_id)
-    lib_acl.remove_permissions_referencing(dom, resource_id)
+    stonith_level_rm_device(dom, resource_id)
+    lib_acl.dom_remove_permissions_referencing(dom, resource_id)
     return dom
 
 # This removes a resource from a group, but keeps it in the config
@@ -2349,7 +2364,7 @@ def resource_failcount(argv):
         sys.exit(1)
 
     if len(argv) > 0:
-        node = argv.pop(0) 
+        node = argv.pop(0)
         all_nodes = False
     else:
         all_nodes = True
@@ -2491,7 +2506,7 @@ def print_operations(node, spaces):
         else:
             first = False
         output += op.attrib["name"] + " "
-        for attr,val in op.attrib.items():
+        for attr,val in sorted(op.attrib.items()):
             if attr in ["id","name"] :
                 continue
             output += attr + "=" + val + " "
@@ -2508,7 +2523,7 @@ def print_operations(node, spaces):
 def operation_to_string(op_el):
     parts = []
     parts.append(op_el.getAttribute("name"))
-    for name, value in op_el.attributes.items():
+    for name, value in sorted(op_el.attributes.items()):
         if name in ["id", "name"]:
             continue
         parts.append(name + "=" + value)
@@ -2530,24 +2545,25 @@ def get_attrs(node, prepend_string = "", append_string = ""):
     else:
         return output.rstrip()
 
-def resource_cleanup(res_id):
-    (output, retval) = utils.run(["crm_resource", "-C", "-r", res_id])
-    if retval != 0:
-        utils.err("Unable to cleanup resource: %s" % res_id + "\n" + output)
-    else:
-        print(output)
+def resource_cleanup(argv):
+    resource = None
+    node = None
 
-def resource_cleanup_all():
-    (output, retval) = utils.run(["crm_resource", "-C"])
-    if retval != 0:
-        utils.err("Unexpected error occured. 'crm_resource -C' err_code: %s\n%s" % (retval, output))
-    else:
-        print(output)
+    if len(argv) > 1:
+        raise CmdLineInputError()
+    if argv:
+        resource = argv[0]
+    if "--node" in utils.pcs_options:
+        node = utils.pcs_options["--node"]
+    force = "--force" in utils.pcs_options
+
+    print(lib_pacemaker.resource_cleanup(
+        utils.cmd_runner(), resource, node, force
+    ))
 
 def resource_history(args):
     dom = utils.get_cib_dom()
     resources = {}
-    calls = {}
     lrm_res = dom.getElementsByTagName("lrm_resource")
     for res in lrm_res:
         res_id = res.getAttribute("id")
@@ -2559,10 +2575,10 @@ def resource_history(args):
     for res in sorted(resources):
         print("Resource: %s" % res)
         for cid in sorted(resources[res]):
-            (last_date,retval) = utils.run(["date","-d", "@" + resources[res][cid][1].getAttribute("last-rc-change")])
+            (last_date, dummy_retval) = utils.run(["date","-d", "@" + resources[res][cid][1].getAttribute("last-rc-change")])
             last_date = last_date.rstrip()
             rc_code = resources[res][cid][1].getAttribute("rc-code")
-            operation = resources[res][cid][1].getAttribute("operation") 
+            operation = resources[res][cid][1].getAttribute("operation")
             if rc_code != "0":
                 print("  Failed on %s" % last_date)
             elif operation == "stop":
@@ -2647,7 +2663,7 @@ def resource_relocate_get_locations(cib_dom, resources=None):
     updated_cib, updated_resources = resource_relocate_set_stickiness(
         cib_dom, resources
     )
-    simout, transitions, new_cib = utils.simulate_cib(updated_cib)
+    dummy_simout, transitions, new_cib = utils.simulate_cib(updated_cib)
     operation_list = utils.get_operations_from_transitions(transitions)
     locations = utils.get_resources_location_from_operations(
         new_cib, operation_list
@@ -2662,8 +2678,8 @@ def resource_relocate_get_locations(cib_dom, resources=None):
     ]
 
 def resource_relocate_show(cib_dom):
-    updated_cib, updated_resources = resource_relocate_set_stickiness(cib_dom)
-    simout, transitions, new_cib = utils.simulate_cib(updated_cib)
+    updated_cib, dummy_updated_resources = resource_relocate_set_stickiness(cib_dom)
+    simout, dummy_transitions, dummy_new_cib = utils.simulate_cib(updated_cib)
     in_status = False
     in_status_resources = False
     in_transitions = False
@@ -2793,8 +2809,29 @@ def print_resources_utilization():
     for resource_el in cib.getElementsByTagName("primitive"):
         u = utils.get_utilization_str(resource_el)
         if u:
-           utilization[resource_el.getAttribute("id")] = u
+            utilization[resource_el.getAttribute("id")] = u
 
     print("Resource Utilization:")
     for resource in sorted(utilization):
         print(" {0}: {1}".format(resource, utilization[resource]))
+
+
+def get_resource_agent_info(argv):
+    if len(argv) != 1:
+        utils.err("One parameter expected")
+
+    agent = argv[0]
+    try:
+        metadata_dom = lib_ra.get_resource_agent_metadata(
+            utils.cmd_runner(),
+            agent
+        )
+        metadata = lib_ra.get_agent_desc(metadata_dom)
+        metadata["name"] = agent
+        metadata["parameters"] = lib_ra.get_resource_agent_parameters(
+            metadata_dom
+        )
+
+        print(json.dumps(metadata))
+    except lib_ra.LibraryError as e:
+        utils.process_library_reports(e.args)
diff --git a/pcs/rule.py b/pcs/rule.py
index 92407ef..896c1ad 100644
--- a/pcs/rule.py
+++ b/pcs/rule.py
@@ -1,12 +1,14 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
 import re
 import xml.dom.minidom
 
-import utils
+from pcs import utils
 
 
 # main functions
diff --git a/pcs/settings.py b/pcs/settings.py
index e329e3f..83992e6 100644
--- a/pcs/settings.py
+++ b/pcs/settings.py
@@ -1,23 +1 @@
-pacemaker_binaries = "/usr/sbin/"
-corosync_binaries = "/usr/sbin/"
-ccs_binaries = "/usr/sbin/"
-corosync_conf_file = "/etc/corosync/corosync.conf"
-cluster_conf_file = "/etc/cluster/cluster.conf"
-fence_agent_binaries = "/usr/sbin/"
-pengine_binary = "/usr/libexec/pacemaker/pengine"
-crmd_binary = "/usr/libexec/pacemaker/crmd"
-cib_binary = "/usr/libexec/pacemaker/cib"
-stonithd_binary = "/usr/libexec/pacemaker/stonithd"
-pcs_version = "0.9.149"
-crm_report = pacemaker_binaries + "crm_report"
-crm_verify = pacemaker_binaries + "crm_verify"
-pcsd_cert_location = "/var/lib/pcsd/pcsd.crt"
-pcsd_key_location = "/var/lib/pcsd/pcsd.key"
-pcsd_tokens_location = "/var/lib/pcsd/tokens"
-pcsd_users_conf_location = "/var/lib/pcsd/pcs_users.conf"
-pcsd_settings_conf_location = "/var/lib/pcsd/pcs_settings.conf"
-pcsd_exec_location = "/usr/lib/pcsd/"
-corosync_uidgid_dir = "/etc/corosync/uidgid.d/"
-cib_dir = "/var/lib/pacemaker/cib/"
-pacemaker_uname = "hacluster"
-pacemaker_gname = "haclient"
+from pcs.settings_default import *
diff --git a/pcs/settings.py.debian b/pcs/settings.py.debian
index 3e3e6cf..a49e123 100644
--- a/pcs/settings.py.debian
+++ b/pcs/settings.py.debian
@@ -1,23 +1,6 @@
-pacemaker_binaries = "/usr/sbin/"
-corosync_binaries = "/usr/sbin/"
-ccs_binaries = "/usr/sbin/"
-corosync_conf_file = "/etc/corosync/corosync.conf"
-cluster_conf_file = "/etc/cluster/cluster.conf"
-fence_agent_binaries = "/usr/sbin/"
+from pcs.settings_default import *
 pengine_binary = "/usr/lib/DEB_HOST_MULTIARCH/pacemaker/pengine"
 crmd_binary = "/usr/lib/DEB_HOST_MULTIARCH/pacemaker/crmd"
 cib_binary = "/usr/lib/DEB_HOST_MULTIARCH/pacemaker/cib"
 stonithd_binary = "/usr/lib/DEB_HOST_MULTIARCH/pacemaker/stonithd"
-pcs_version = "0.9.149"
-crm_report = pacemaker_binaries + "crm_report"
-crm_verify = pacemaker_binaries + "crm_verify"
-pcsd_cert_location = "/var/lib/pcsd/pcsd.crt"
-pcsd_key_location = "/var/lib/pcsd/pcsd.key"
-pcsd_tokens_location = "/var/lib/pcsd/tokens"
-pcsd_users_conf_location = "/var/lib/pcsd/pcs_users.conf"
-pcsd_settings_conf_location = "/var/lib/pcsd/pcs_settings.conf"
 pcsd_exec_location = "/usr/share/pcsd/"
-corosync_uidgid_dir = "/etc/corosync/uidgid.d/"
-cib_dir = "/var/lib/pacemaker/cib/"
-pacemaker_uname = "hacluster"
-pacemaker_gname = "haclient"
diff --git a/pcs/settings.py b/pcs/settings_default.py
similarity index 79%
copy from pcs/settings.py
copy to pcs/settings_default.py
index e329e3f..6b9c728 100644
--- a/pcs/settings.py
+++ b/pcs/settings_default.py
@@ -1,3 +1,5 @@
+import os.path
+
 pacemaker_binaries = "/usr/sbin/"
 corosync_binaries = "/usr/sbin/"
 ccs_binaries = "/usr/sbin/"
@@ -8,9 +10,10 @@ pengine_binary = "/usr/libexec/pacemaker/pengine"
 crmd_binary = "/usr/libexec/pacemaker/crmd"
 cib_binary = "/usr/libexec/pacemaker/cib"
 stonithd_binary = "/usr/libexec/pacemaker/stonithd"
-pcs_version = "0.9.149"
+pcs_version = "0.9.151"
 crm_report = pacemaker_binaries + "crm_report"
 crm_verify = pacemaker_binaries + "crm_verify"
+crm_mon_schema = '/usr/share/pacemaker/crm_mon.rng'
 pcsd_cert_location = "/var/lib/pcsd/pcsd.crt"
 pcsd_key_location = "/var/lib/pcsd/pcsd.key"
 pcsd_tokens_location = "/var/lib/pcsd/tokens"
@@ -21,3 +24,6 @@ corosync_uidgid_dir = "/etc/corosync/uidgid.d/"
 cib_dir = "/var/lib/pacemaker/cib/"
 pacemaker_uname = "hacluster"
 pacemaker_gname = "haclient"
+ocf_root = "/usr/lib/ocf/"
+ocf_resources = os.path.join(ocf_root, "resource.d/")
+nagios_metadata_path = "/usr/share/pacemaker/nagios/plugins-metadata/"
diff --git a/pcs/status.py b/pcs/status.py
index 25817b0..0e5e0e7 100644
--- a/pcs/status.py
+++ b/pcs/status.py
@@ -1,17 +1,20 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
 import sys
 import os
 
-import resource
-import cluster
-import settings
-import usage
-import utils
-
+from pcs import (
+    resource,
+    usage,
+    utils,
+)
+from pcs.lib.errors import LibraryError
+from pcs.lib.pacemaker_state import ClusterState
 
 def status_cmd(argv):
     if len(argv) == 0:
@@ -30,7 +33,7 @@ def status_cmd(argv):
     elif (sub_cmd == "nodes"):
         nodes_status(argv)
     elif (sub_cmd == "pcsd"):
-        cluster.cluster_gui_status(argv)
+        cluster_pcsd_status(argv)
     elif (sub_cmd == "xml"):
         xml_status()
     elif (sub_cmd == "corosync"):
@@ -69,8 +72,9 @@ def full_status():
     print(output)
 
     if not utils.usefile:
-        print_pcsd_daemon_status()
-        print()
+        if  "--full" in utils.pcs_options:
+            print_pcsd_daemon_status()
+            print()
         utils.serviceStatus("  ")
 
 # Parse crm_mon for status
@@ -87,7 +91,14 @@ def nodes_status(argv):
 
     if len(argv) == 1 and (argv[0] == "config"):
         corosync_nodes = utils.getNodesFromCorosyncConf()
-        pacemaker_nodes = utils.getNodesFromPacemaker()
+        try:
+            pacemaker_nodes = sorted([
+                node.attrs.name for node
+                in ClusterState(utils.getClusterStateXml()).node_section.nodes
+                if node.attrs.type != 'remote'
+            ])
+        except LibraryError as e:
+            utils.process_library_reports(e.args)
         print("Corosync Nodes:")
         if corosync_nodes:
             print(" " + " ".join(corosync_nodes))
@@ -102,9 +113,7 @@ def nodes_status(argv):
         online_nodes = utils.getCorosyncActiveNodes()
         offline_nodes = []
         for node in all_nodes:
-            if node in online_nodes:
-                next
-            else:
+            if node not in online_nodes:
                 offline_nodes.append(node)
 
         online_nodes.sort()
@@ -224,31 +233,31 @@ def xml_status():
 
 def is_cman_running():
     if utils.is_systemctl():
-        output, retval = utils.run(["systemctl", "status", "cman.service"])
+        dummy_output, retval = utils.run(["systemctl", "status", "cman.service"])
     else:
-        output, retval = utils.run(["service", "cman", "status"])
+        dummy_output, retval = utils.run(["service", "cman", "status"])
     return retval == 0
 
 def is_corosyc_running():
     if utils.is_systemctl():
-        output, retval = utils.run(["systemctl", "status", "corosync.service"])
+        dummy_output, retval = utils.run(["systemctl", "status", "corosync.service"])
     else:
-        output, retval = utils.run(["service", "corosync", "status"])
+        dummy_output, retval = utils.run(["service", "corosync", "status"])
     return retval == 0
 
 def is_pacemaker_running():
     if utils.is_systemctl():
-        output, retval = utils.run(["systemctl", "status", "pacemaker.service"])
+        dummy_output, retval = utils.run(["systemctl", "status", "pacemaker.service"])
     else:
-        output, retval = utils.run(["service", "pacemaker", "status"])
+        dummy_output, retval = utils.run(["service", "pacemaker", "status"])
     return retval == 0
 
 def print_pcsd_daemon_status():
     print("PCSD Status:")
     if os.getuid() == 0:
-        cluster.cluster_gui_status([], True)
+        cluster_pcsd_status([], True)
     else:
-        err_msgs, exitcode, std_out, std_err = utils.call_local_pcsd(
+        err_msgs, exitcode, std_out, dummy_std_err = utils.call_local_pcsd(
             ['status', 'pcsd'], True
         )
         if err_msgs:
@@ -259,3 +268,49 @@ def print_pcsd_daemon_status():
         else:
             print("Unable to get PCSD status")
 
+def check_nodes(node_list, prefix=""):
+    """
+    Print pcsd status on node_list, return if there is any pcsd not online
+    """
+    if not utils.is_rhel6():
+        pm_nodes = utils.getPacemakerNodesID(allow_failure=True)
+        cs_nodes = utils.getCorosyncNodesID(allow_failure=True)
+
+    STATUS_ONLINE = 0
+    status_desc_map = {
+        STATUS_ONLINE: 'Online',
+        3: 'Unable to authenticate'
+    }
+    status_list = []
+    def report(node, returncode, output):
+        print("{0}{1}: {2}".format(
+            prefix,
+            node if utils.is_rhel6() else utils.prepare_node_name(
+                node, pm_nodes, cs_nodes
+            ),
+            status_desc_map.get(returncode, 'Offline')
+        ))
+        status_list.append(returncode)
+
+    utils.run_parallel(
+        utils.create_task_list(report, utils.checkAuthorization, node_list)
+    )
+
+    return any([status != STATUS_ONLINE for status in status_list])
+
+# If no arguments get current cluster node status, otherwise get listed
+# nodes status
+def cluster_pcsd_status(argv, dont_exit=False):
+    bad_nodes = False
+    if len(argv) == 0:
+        nodes = utils.getNodesFromCorosyncConf()
+        if len(nodes) == 0:
+            if utils.is_rhel6():
+                utils.err("no nodes found in cluster.conf")
+            else:
+                utils.err("no nodes found in corosync.conf")
+        bad_nodes = check_nodes(nodes, "  ")
+    else:
+        bad_nodes = check_nodes(argv, "  ")
+    if bad_nodes and not dont_exit:
+        sys.exit(2)
diff --git a/pcs/stonith.py b/pcs/stonith.py
index d11508a..5937323 100644
--- a/pcs/stonith.py
+++ b/pcs/stonith.py
@@ -1,16 +1,24 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
 import sys
 import re
 import glob
-from xml.dom.minidom import parseString
-
-import usage
-import utils
-import resource
+import json
+import os
+
+from pcs import (
+    resource,
+    usage,
+    utils,
+)
+from pcs.cli.common.errors import CmdLineInputError
+from pcs.lib.errors import LibraryError
+import pcs.lib.resource_agent as lib_ra
 
 def stonith_cmd(argv):
     if len(argv) == 0:
@@ -51,13 +59,16 @@ def stonith_cmd(argv):
     elif (sub_cmd == "fence"):
         stonith_fence(argv)
     elif (sub_cmd == "cleanup"):
-        if len(argv) == 0:
-            resource.resource_cleanup_all()
-        else:
-            res_id = argv.pop(0)
-            resource.resource_cleanup(res_id)
+        try:
+            resource.resource_cleanup(argv)
+        except CmdLineInputError as e:
+            utils.exit_on_cmdline_input_errror(e, "stonith", 'cleanup')
+        except LibraryError as e:
+            utils.process_library_reports(e.args)
     elif (sub_cmd == "confirm"):
         stonith_confirm(argv)
+    elif (sub_cmd == "get_fence_agent_info"):
+        get_fence_agent_info(argv)
     else:
         usage.stonith()
         sys.exit(1)
@@ -88,85 +99,34 @@ def stonith_list_available(argv):
 
     for fd in fence_devices_filtered:
         sd = ""
-        fd_name = fd[10:]
-        if not "--nodesc" in utils.pcs_options:
-            metadata = utils.get_stonith_metadata(fd)
-            if metadata == False:
-                utils.err("no metadata for %s" % fd, False)
-                continue
+        agent_name = os.path.basename(fd)
+        if "--nodesc" not in utils.pcs_options:
             try:
-                dom = parseString(metadata)
-            except Exception:
+                metadata = lib_ra.get_fence_agent_metadata(
+                    utils.cmd_runner(), agent_name
+                )
+                shortdesc = lib_ra.get_agent_desc(metadata)["shortdesc"]
+                if shortdesc:
+                    sd = " - " + resource.format_desc(
+                        len(agent_name) + 3, shortdesc
+                    )
+            except LibraryError as e:
                 utils.err(
-                    "unable to parse metadata for fence agent: %s" % (fd_name),
-                    False
+                    e.args[-1].message, False
                 )
                 continue
-            ra = dom.documentElement
-            shortdesc = ra.getAttribute("shortdesc")
-
-            if len(shortdesc) > 0:
-                sd = " - " +  resource.format_desc(fd_name.__len__() + 3, shortdesc)
-        print(fd_name + sd)
+        print(agent_name + sd)
 
 def stonith_list_options(stonith_agent):
-    metadata = utils.get_stonith_metadata(utils.fence_bin + stonith_agent)
-    if not metadata:
-        utils.err("unable to get metadata for %s" % stonith_agent)
+    runner = utils.cmd_runner()
     try:
-        dom = parseString(metadata)
-    except xml.parsers.expat.ExpatError as e:
-        utils.err("Unable to parse xml for '%s': %s" % (stonith_agent, e))
-
-    title = dom.documentElement.getAttribute("name") or stonith_agent
-    short_desc = dom.documentElement.getAttribute("shortdesc")
-    if not short_desc:
-        for sd in dom.documentElement.getElementsByTagName("shortdesc"):
-            if sd.parentNode.tagName == "resource-agent" and sd.firstChild:
-                short_desc = sd.firstChild.data.strip()
-                break
-    long_desc = ""
-    for ld in dom.documentElement.getElementsByTagName("longdesc"):
-        if ld.parentNode.tagName == "resource-agent" and ld.firstChild:
-            long_desc = ld.firstChild.data.strip()
-            break
-
-    if short_desc:
-        title += " - " + resource.format_desc(len(title + " - "), short_desc)
-    print(title)
-    print()
-    if long_desc:
-        print(long_desc)
-        print()
-    print("Stonith options:")
-
-    params = dom.documentElement.getElementsByTagName("parameter")
-    for param in params:
-        name = param.getAttribute("name")
-        if param.getAttribute("required") == "1":
-            name += " (required)"
-        desc = ""
-        shortdesc_els = param.getElementsByTagName("shortdesc")
-        if shortdesc_els and shortdesc_els[0].firstChild:
-            desc = shortdesc_els[0].firstChild.nodeValue.strip().replace("\n", " ")
-        if not desc:
-            desc = "No description available"
-        indent = name.__len__() + 4
-        desc = resource.format_desc(indent, desc)
-        print("  " + name + ": " + desc)
-
-    default_stonith_options = utils.get_default_stonith_options()
-    for do in default_stonith_options:
-        name = do.attrib["name"]
-        desc = ""
-        if len(do.findall(str("shortdesc"))) > 0:
-            if do.findall(str("shortdesc"))[0].text:
-                desc = do.findall(str("shortdesc"))[0].text.strip()
-        if not desc:
-            desc = "No description available"
-        indent = len(name) + 4
-        desc = resource.format_desc(indent, desc)
-        print("  " + name + ": " + desc)
+        metadata = lib_ra.get_fence_agent_metadata(runner, stonith_agent)
+        desc = lib_ra.get_agent_desc(metadata)
+        params = lib_ra.get_fence_agent_parameters(runner, metadata)
+        resource.resource_print_options(stonith_agent, desc, params)
+    except LibraryError as e:
+        utils.process_library_reports(e.args)
+
 
 def stonith_create(argv):
     if len(argv) < 2:
@@ -178,13 +138,19 @@ def stonith_create(argv):
     st_values, op_values, meta_values = resource.parse_resource_options(
         argv, with_clone=False
     )
-    metadata = utils.get_stonith_metadata("/usr/sbin/" + stonith_type)
-    if metadata:
+
+    try:
+        metadata = lib_ra.get_fence_agent_metadata(
+            utils.cmd_runner(), stonith_type
+        )
         if stonith_does_agent_provide_unfencing(metadata):
             meta_values = [
                 meta for meta in meta_values if not meta.startswith("provides=")
             ]
             meta_values.append("provides=unfencing")
+    except LibraryError as e:
+        utils.process_library_reports(e.args)
+
     resource.resource_create(
         stonith_id, "stonith:" + stonith_type, st_values, op_values, meta_values
     )
@@ -205,7 +171,7 @@ def stonith_level(argv):
         if len(argv) < 1:
             usage.stonith(["level remove"])
             sys.exit(1)
-        
+
         node = ""
         devices = ""
         if len(argv) == 2:
@@ -233,7 +199,7 @@ def stonith_level_add(level, node, devices):
     if not re.search(r'^\d+$', level) or re.search(r'^0+$', level):
         utils.err("invalid level '{0}', use a positive integer".format(level))
     level = level.lstrip('0')
-    if not "--force" in utils.pcs_options:
+    if "--force" not in utils.pcs_options:
         for dev in devices.split(","):
             if not utils.is_stonith_resource(dev):
                 utils.err("%s is not a stonith id (use --force to override)" % dev)
@@ -277,7 +243,6 @@ def stonith_level_rm(level, node, devices):
         ft = ft[0]
 
     fls = ft.getElementsByTagName("fencing-level")
-    fls_to_remove = []
 
     if node != "":
         if devices != "":
@@ -310,23 +275,6 @@ def stonith_level_rm(level, node, devices):
 
     utils.replace_cib_configuration(dom)
 
-def stonith_level_rm_device(cib_dom, stn_id):
-    topology_el_list = cib_dom.getElementsByTagName("fencing-topology")
-    if not topology_el_list:
-        return cib_dom
-    topology_el = topology_el_list[0]
-    for level_el in topology_el.getElementsByTagName("fencing-level"):
-        device_list = level_el.getAttribute("devices").split(",")
-        if stn_id in device_list:
-            new_device_list = [dev for dev in device_list if dev != stn_id]
-            if new_device_list:
-                level_el.setAttribute("devices", ",".join(new_device_list))
-            else:
-                level_el.parentNode.removeChild(level_el)
-    if not topology_el.getElementsByTagName("fencing-level"):
-        topology_el.parentNode.removeChild(topology_el)
-    return cib_dom
-
 def stonith_level_clear(node = None):
     dom = utils.get_cib_dom()
     ft = dom.getElementsByTagName("fencing-topology")
@@ -355,7 +303,6 @@ def stonith_level_verify():
     fls = dom.getElementsByTagName("fencing-level")
     for fl in fls:
         node = fl.getAttribute("target")
-        level = fl.getAttribute("index")
         devices = fl.getAttribute("devices")
         for dev in devices.split(","):
             if not utils.is_stonith_resource(dev):
@@ -418,23 +365,40 @@ def stonith_confirm(argv):
     else:
         print("Node: %s confirmed fenced" % node)
 
-def stonith_does_agent_provide_unfencing(metadata_string):
-    try:
-        dom = parseString(metadata_string)
-        for agent in utils.dom_get_children_by_tag_name(dom, "resource-agent"):
-            for actions in utils.dom_get_children_by_tag_name(agent, "actions"):
-                for action in utils.dom_get_children_by_tag_name(
-                    actions, "action"
-                ):
-                    if (
-                        action.getAttribute("name") == "on"
-                        and
-                        action.getAttribute("on_target") == "1"
-                        and
-                        action.getAttribute("automatic") == "1"
-                    ):
-                        return True
-    except xml.parsers.expat.ExpatError as e:
-        return False
+def stonith_does_agent_provide_unfencing(metadata_dom):
+    for action in lib_ra.get_agent_actions(metadata_dom):
+        if (
+            action["name"] == "on" and
+            "on_target" in action and
+            action["on_target"] == "1" and
+            "automatic" in action and
+            action["automatic"] == "1"
+        ):
+            return True
     return False
 
+
+def get_fence_agent_info(argv):
+    if len(argv) != 1:
+        utils.err("One parameter expected")
+
+    agent = argv[0]
+    if not agent.startswith("stonith:"):
+        utils.err("Invalid fence agent name")
+
+    runner = utils.cmd_runner()
+
+    try:
+        metadata_dom = lib_ra.get_fence_agent_metadata(
+            runner,
+            agent.split("stonith:", 1)[1]
+        )
+        metadata = lib_ra.get_agent_desc(metadata_dom)
+        metadata["name"] = agent
+        metadata["parameters"] = lib_ra.get_fence_agent_parameters(
+            runner, metadata_dom
+        )
+
+        print(json.dumps(metadata))
+    except lib_ra.LibraryError as e:
+        utils.process_library_reports(e.args)
diff --git a/pcs/test/Makefile b/pcs/test/Makefile
deleted file mode 100644
index 34334d2..0000000
--- a/pcs/test/Makefile
+++ /dev/null
@@ -1,16 +0,0 @@
-ifndef $(PYTHON)
-	PYTHON = python
-endif
-
-test:
-	$(PYTHON) test_utils.py ${pyunit_flags}
-	$(PYTHON) test_corosync_conf.py ${pyunit_flags}
-	$(PYTHON) test_cluster.py ${pyunit_flags}
-	$(PYTHON) test_resource.py ${pyunit_flags}
-	$(PYTHON) test_rule.py ${pyunit_flags}
-	$(PYTHON) test_constraints.py ${pyunit_flags}
-	$(PYTHON) test_stonith.py ${pyunit_flags}
-	$(PYTHON) test_properties.py ${pyunit_flags}
-	$(PYTHON) test_acl.py ${pyunit_flags}
-	$(PYTHON) test_node.py ${pyunit_flags}
-	$(PYTHON) test_library_acl.py ${pyunit_flags}
diff --git a/pcs/test/__init__.py b/pcs/test/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/test/a.xml b/pcs/test/a.xml
deleted file mode 100644
index 194d08c..0000000
--- a/pcs/test/a.xml
+++ /dev/null
@@ -1,13 +0,0 @@
-<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-1.2" crm_feature_set="3.0.6" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
-  <configuration>
-    <crm_config/>
-    <nodes>
-      <node id="1" type="normal" uname="rh7-1"/>
-      <node id="2" type="normal" uname="rh7-2"/>
-      <node id="3" type="normal" uname="rh7-3"/>
-    </nodes>
-    <resources/>
-    <constraints/>
-  </configuration>
-  <status/>
-</cib>
\ No newline at end of file
diff --git a/pcs/test/blank.xml b/pcs/test/blank.xml
deleted file mode 100644
index 194d08c..0000000
--- a/pcs/test/blank.xml
+++ /dev/null
@@ -1,13 +0,0 @@
-<cib epoch="557" num_updates="122" admin_epoch="0" validate-with="pacemaker-1.2" crm_feature_set="3.0.6" update-origin="rh7-3" update-client="crmd" cib-last-written="Thu Aug 23 16:49:17 2012" have-quorum="0" dc-uuid="2">
-  <configuration>
-    <crm_config/>
-    <nodes>
-      <node id="1" type="normal" uname="rh7-1"/>
-      <node id="2" type="normal" uname="rh7-2"/>
-      <node id="3" type="normal" uname="rh7-3"/>
-    </nodes>
-    <resources/>
-    <constraints/>
-  </configuration>
-  <status/>
-</cib>
\ No newline at end of file
diff --git a/pcs/test/final.xml b/pcs/test/final.xml
deleted file mode 100644
index 79ad1d1..0000000
--- a/pcs/test/final.xml
+++ /dev/null
@@ -1,117 +0,0 @@
-  <configuration>
-    <crm_config/>
-    <nodes>
-      <node id="1" type="normal" uname="rh7-1"/>
-      <node id="2" type="normal" uname="rh7-2"/>
-      <node id="3" type="normal" uname="rh7-3"/>
-    </nodes>
-    <resources>
-      <primitive class="ocf" id="ClusterIP" provider="heartbeat" type="IPaddr2">
-        <instance_attributes id="ClusterIP-instance_attributes">
-          <nvpair id="ClusterIP-instance_attributes-ip" name="ip" value="192.168.0.99"/>
-          <nvpair id="ClusterIP-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
-        </instance_attributes>
-        <operations>
-          <op id="ClusterIP-interval-30s" interval="30s" name="monitor"/>
-        </operations>
-      </primitive>
-      <primitive class="ocf" id="DummyRes" provider="heartbeat" type="Dummy">
-        <instance_attributes id="DummyRes-instance_attributes">
-          <nvpair id="DummyRes-instance_attributes-fake" name="fake" value="my fake"/>
-        </instance_attributes>
-      </primitive>
-      <group id="MyGroup">
-        <primitive class="ocf" id="ClusterIP2" provider="heartbeat" type="IPaddr2">
-          <instance_attributes id="ClusterIP2-instance_attributes">
-            <nvpair id="ClusterIP2-instance_attributes-ip" name="ip" value="192.168.0.100"/>
-            <nvpair id="ClusterIP2-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
-          </instance_attributes>
-          <operations>
-            <op id="ClusterIP2-interval-30s" interval="30s" name="monitor"/>
-            <op id="ClusterIP2-name-monitor-interval-31s" interval="31s" name="monitor"/>
-          </operations>
-        </primitive>
-        <primitive class="ocf" id="ClusterIP32" provider="heartbeat" type="IPaddr2">
-          <instance_attributes id="ClusterIP32-instance_attributes">
-            <nvpair id="ClusterIP32-instance_attributes-ip" name="ip" value="192.168.0.99"/>
-            <nvpair id="ClusterIP32-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
-          </instance_attributes>
-          <operations>
-            <op id="ClusterIP32-interval-30s" interval="30s" name="monitor"/>
-          </operations>
-        </primitive>
-      </group>
-      <clone id="ClusterIP4-clone">
-        <primitive class="ocf" id="ClusterIP4" provider="heartbeat" type="IPaddr2">
-          <instance_attributes id="ClusterIP4-instance_attributes">
-            <nvpair id="ClusterIP4-instance_attributes-ip" name="ip" value="192.168.0.99"/>
-            <nvpair id="ClusterIP4-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
-          </instance_attributes>
-          <operations>
-            <op id="ClusterIP4-interval-30s" interval="30s" name="monitor"/>
-          </operations>
-        </primitive>
-        <meta_attributes id="ClusterIP4-clone-meta">
-          <nvpair id="ClusterIP4-globally-unique" name="globally-unique" value="true"/>
-        </meta_attributes>
-      </clone>
-      <primitive class="ocf" id="ClusterIP7" provider="heartbeat" type="IPaddr2">
-        <instance_attributes id="ClusterIP7-instance_attributes">
-          <nvpair id="ClusterIP7-instance_attributes-ip" name="ip" value="192.168.0.99"/>
-          <nvpair id="ClusterIP7-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
-        </instance_attributes>
-        <operations>
-          <op id="ClusterIP7-interval-30s" interval="30s" name="monitor"/>
-        </operations>
-      </primitive>
-      <clone id="ClusterIP8-clone">
-        <primitive class="ocf" id="ClusterIP8" provider="heartbeat" type="IPaddr2">
-          <instance_attributes id="ClusterIP8-instance_attributes">
-            <nvpair id="ClusterIP8-instance_attributes-ip" name="ip" value="192.168.0.99"/>
-            <nvpair id="ClusterIP8-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
-          </instance_attributes>
-          <operations>
-            <op id="ClusterIP8-interval-30s" interval="30s" name="monitor"/>
-          </operations>
-        </primitive>
-        <meta_attributes id="ClusterIP8-clone-meta"/>
-      </clone>
-      <master id="ClusterIP9-master">
-        <primitive class="ocf" id="ClusterIP9" provider="heartbeat" type="IPaddr2">
-          <instance_attributes id="ClusterIP9-instance_attributes">
-            <nvpair id="ClusterIP9-instance_attributes-ip" name="ip" value="192.168.0.99"/>
-            <nvpair id="ClusterIP9-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
-          </instance_attributes>
-          <operations>
-            <op id="ClusterIP9-interval-30s" interval="30s" name="monitor"/>
-          </operations>
-        </primitive>
-      </master>
-      <primitive class="ocf" id="ClusterIP81" provider="heartbeat" type="IPaddr2">
-        <instance_attributes id="ClusterIP81-instance_attributes">
-          <nvpair id="ClusterIP81-instance_attributes-ip" name="ip" value="192.168.0.99"/>
-          <nvpair id="ClusterIP81-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
-        </instance_attributes>
-        <operations>
-          <op id="ClusterIP81-interval-30s" interval="30s" name="monitor"/>
-        </operations>
-      </primitive>
-      <master id="MyMaster">
-        <primitive class="ocf" id="ClusterIP5" provider="heartbeat" type="IPaddr2">
-          <instance_attributes id="ClusterIP5-instance_attributes">
-            <nvpair id="ClusterIP5-instance_attributes-ip" name="ip" value="192.168.0.99"/>
-            <nvpair id="ClusterIP5-instance_attributes-cidr_netmask" name="cidr_netmask" value="32"/>
-          </instance_attributes>
-          <operations>
-            <op id="ClusterIP5-interval-30s" interval="30s" name="monitor"/>
-          </operations>
-        </primitive>
-        <meta_attributes id="MyMaster-meta_attributes">
-          <nvpair id="MyMaster-meta_attributes-master-max" name="master-max" value="2"/>
-        </meta_attributes>
-      </master>
-    </resources>
-    <constraints/>
-  </configuration>
-  <status/>
-</cib>
\ No newline at end of file
diff --git a/pcs/test/library_test_tools.py b/pcs/test/library_test_tools.py
deleted file mode 100644
index 9b6bcfc..0000000
--- a/pcs/test/library_test_tools.py
+++ /dev/null
@@ -1,93 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import xml.dom.minidom
-from lxml.doctestcompare import LXMLOutputChecker
-from doctest import Example
-
-from library_acl import LibraryError
-
-class LibraryAssertionMixin(object):
-    def __find_report_info(self, report_info_list, report_item):
-        for report_info in report_info_list:
-            if(
-                report_item.severity == report_info[0]
-                and
-                report_item.code == report_info[1]
-                and
-                #checks only presence and match of expected in info,
-                #extra info is ignored
-                all(
-                    (k in report_item.info and report_item.info[k]==v)
-                    for k,v in report_info[2].iteritems()
-                )
-            ):
-                return report_info
-        raise AssertionError(
-            'Unexpected report given: {0}'
-            .format(repr((
-                report_item.severity, report_item.code, repr(report_item.info)
-            )))
-        )
-
-    def __check_error(self, e, report_info_list):
-        for report_item in e.args:
-            report_info_list.remove(
-                self.__find_report_info(report_info_list, report_item)
-            )
-
-        if report_info_list:
-            raise AssertionError(
-                'In the report from LibraryError was not present: '
-                +', '+repr(report_info_list)
-            )
-
-    def assert_raise_library_error(self, callableObj, *report_info_list):
-        if not report_info_list:
-            raise AssertionError(
-                'Raising LibraryError expected, but no report item specified.'
-                +' Please specify report items, that you expect in LibraryError'
-            )
-
-        try:
-            callableObj()
-            raise AssertionError('LibraryError not raised')
-        except LibraryError as e:
-            self.__check_error(e, list(report_info_list))
-
-    def assert_cib_equal(self, expected_cib, got_cib=None):
-        got_cib = got_cib if got_cib else self.cib
-        got_xml = got_cib.dom.toxml()
-        expected_xml = expected_cib.dom.toxml()
-
-        checker = LXMLOutputChecker()
-        if checker.check_output(expected_xml, got_xml, 0):
-            return
-
-        raise AssertionError(checker.output_difference(
-            Example("", expected_xml),
-            got_xml,
-            0
-        ))
-
-class CibManipulation(object):
-    def __init__(self, file_name):
-        self.dom = xml.dom.minidom.parse(file_name)
-
-    def __append_to_child(self, element, xml_string):
-        element.appendChild(
-            xml.dom.minidom.parseString(xml_string).firstChild
-        )
-
-    def append_to_first_tag_name(self, tag_name, xml_string):
-        self.__append_to_child(
-            self.dom.getElementsByTagName(tag_name)[0], xml_string
-        )
-        return self
-
-def get_cib_manipulation_creator(file_name):
-    def create_cib_manipulation():
-       return CibManipulation(file_name)
-    return create_cib_manipulation
diff --git a/pcs/test/pcs_test_assertions.py b/pcs/test/pcs_test_assertions.py
deleted file mode 100644
index 8e6fbc9..0000000
--- a/pcs/test/pcs_test_assertions.py
+++ /dev/null
@@ -1,75 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import difflib
-
-def prepare_diff(first, second):
-    return ''.join(
-        difflib.Differ().compare(first.splitlines(1), second.splitlines(1))
-    )
-
-
-class AssertPcsMixin(object):
-    def assert_pcs_success(self, command, stdout_full=None, stdout_start=None):
-        full = stdout_full
-        if stdout_start is None and stdout_full is None:
-            full = ''
-
-        self.assert_pcs_result(
-            command,
-            stdout_full=full,
-            stdout_start=stdout_start
-        )
-
-    def assert_pcs_fail(self, command, stdout_full=None, stdout_start=None):
-        self.assert_pcs_result(
-            command,
-            stdout_full=stdout_full,
-            stdout_start=stdout_start,
-            returncode=1
-        )
-
-    def assert_pcs_result(
-        self, command, stdout_full=None, stdout_start=None, returncode=0
-    ):
-        msg = 'Please specify exactly one: stdout_start or stdout_full'
-        if stdout_start is None and stdout_full is None:
-            raise Exception(msg +', none specified')
-
-        if stdout_start is not None and stdout_full is not None:
-            raise Exception(msg +', both specified')
-
-        stdout, pcs_returncode = self.pcs_runner.run(command)
-        self.assertEqual(
-            returncode, pcs_returncode, (
-                'Expected return code "{0}", but was "{1}"'
-                +'\ncommand:\n{2}\nstdout:\n{3}'
-            ).format(returncode, pcs_returncode, command, stdout)
-        )
-        if stdout_start:
-            expected_start = '\n'.join(stdout_start)+'\n' \
-                if isinstance(stdout_start, list) else stdout_start
-
-            if not stdout.startswith(expected_start):
-                self.assertTrue(
-                    False,
-                    'Stdout not start as expected\ncommand:\n'+command
-                    +'\ndiff is (expected 2nd):\n'
-                    +prepare_diff(stdout[:len(expected_start)], expected_start)
-                    +'\nFull stdout:'+stdout
-                )
-        else:
-            expected_full = '\n'.join(stdout_full)+'\n' \
-                if isinstance(stdout_full, list) else stdout_full
-
-            #unicode vs non-unicode not solved here
-            if stdout != expected_full:
-                self.assertEqual(
-                    stdout, expected_full,
-                    'Stdout is not as expected\ncommand:\n'+command
-                    +'\n diff is(expected 2nd):\n'
-                    +prepare_diff(stdout, expected_full)
-                    +'\nFull stdout:'+stdout
-                )
diff --git a/pcs/test/pcs_test_functions.py b/pcs/test/pcs_test_functions.py
deleted file mode 100644
index 3e84455..0000000
--- a/pcs/test/pcs_test_functions.py
+++ /dev/null
@@ -1,84 +0,0 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import os.path
-import sys
-import difflib
-import subprocess
-import re
-import xml.dom.minidom
-parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-sys.path.insert(0,parentdir)
-
-import utils
-
-
-pcs_location = "../pcs.py"
-
-class PcsRunner(object):
-    testfile = 'temp.xml'
-    def __init__(self, testfile='temp.xml'):
-        self.testfile = testfile
-
-
-    def run(self, args):
-        return pcs(self.testfile, args)
-
-
-def pcs(testfile, args = ""):
-    """Run pcs with -f on specified file
-    Return tuple with:
-        shell stdoutdata
-        shell returncode
-    """
-    if args == "":
-        args = testfile
-        testfile = "temp.xml"
-    arg_split = args.split()
-    arg_split_temp = []
-    in_quote = False
-    for arg in arg_split:
-        if in_quote:
-            arg_split_temp[-1] = arg_split_temp[-1] + " " + arg.replace("'","")
-            if arg.find("'") != -1:
-                in_quote = False
-        else:
-            arg_split_temp.append(arg.replace("'",""))
-            if arg.find("'") != -1 and not (arg[0] == "'" and arg[-1] == "'"):
-                in_quote = True
-
-    conf_opts = []
-    if "--corosync_conf" not in args:
-        conf_opts.append("--corosync_conf=corosync.conf")
-    if "--cluster_conf" not in args:
-        conf_opts.append("--cluster_conf=cluster.conf")
-    return utils.run([pcs_location, "-f", testfile] + conf_opts + arg_split_temp)
-
-# Compare output and print usable diff (diff b a)
-# a is the actual output, b is what should be output
-def ac(a,b):
-    if a != b:
-        d = difflib.Differ()
-        diff = d.compare(b.splitlines(1),a.splitlines(1))
-        print("")
-        print("".join(diff))
-        assert False,[a]
-
-def isMinimumPacemakerVersion(cmajor,cminor,crev):
-    output, retval = utils.run(["crm_mon", "--version"])
-    pacemaker_version = output.split("\n")[0]
-    r = re.compile(r"Pacemaker (\d+)\.(\d+)\.(\d+)")
-    m = r.match(pacemaker_version)
-    major = int(m.group(1))
-    minor = int(m.group(2))
-    rev = int(m.group(3))
-
-    if major > cmajor or (major == cmajor and minor > cminor) or (major == cmajor and minor == cminor and rev >= crev):
-        return True
-    return False
-
-
-def get_child_elements(el):
-    return [e for e in el.childNodes if e.nodeType == xml.dom.minidom.Node.ELEMENT_NODE]
diff --git a/pcs/test/.gitignore b/pcs/test/resources/.gitignore
similarity index 55%
rename from pcs/test/.gitignore
rename to pcs/test/resources/.gitignore
index 2d5046c..8c710cf 100644
--- a/pcs/test/.gitignore
+++ b/pcs/test/resources/.gitignore
@@ -1,3 +1,2 @@
 *.tmp
 temp*.xml
-temp.xml-old
diff --git a/pcs/test/empty-1.2.xml b/pcs/test/resources/cib-empty-1.2.xml
similarity index 100%
rename from pcs/test/empty-1.2.xml
rename to pcs/test/resources/cib-empty-1.2.xml
diff --git a/pcs/test/empty-withnodes.xml b/pcs/test/resources/cib-empty-withnodes.xml
similarity index 100%
rename from pcs/test/empty-withnodes.xml
rename to pcs/test/resources/cib-empty-withnodes.xml
diff --git a/pcs/test/empty.xml b/pcs/test/resources/cib-empty.xml
similarity index 100%
rename from pcs/test/empty.xml
rename to pcs/test/resources/cib-empty.xml
diff --git a/pcs/test/large.xml b/pcs/test/resources/cib-large.xml
similarity index 100%
rename from pcs/test/large.xml
rename to pcs/test/resources/cib-large.xml
diff --git a/pcs/test/largefile.xml b/pcs/test/resources/cib-largefile.xml
similarity index 100%
rename from pcs/test/largefile.xml
rename to pcs/test/resources/cib-largefile.xml
diff --git a/pcs/test/cluster.conf b/pcs/test/resources/cluster.conf
similarity index 100%
rename from pcs/test/cluster.conf
rename to pcs/test/resources/cluster.conf
diff --git a/pcs/test/corosync.conf b/pcs/test/resources/corosync-3nodes-qdevice.conf
similarity index 66%
copy from pcs/test/corosync.conf
copy to pcs/test/resources/corosync-3nodes-qdevice.conf
index 636d716..fb249bf 100644
--- a/pcs/test/corosync.conf
+++ b/pcs/test/resources/corosync-3nodes-qdevice.conf
@@ -15,10 +15,23 @@ nodelist {
         ring0_addr: rh7-2
         nodeid: 2
     }
+
+    node {
+        ring0_addr: rh7-3
+        nodeid: 3
+    }
 }
 
 quorum {
     provider: corosync_votequorum
+
+    device {
+        model: net
+
+        net {
+            host: 127.0.0.1
+        }
+    }
 }
 
 logging {
diff --git a/pcs/test/corosync.conf.orig b/pcs/test/resources/corosync-3nodes.conf
similarity index 82%
rename from pcs/test/corosync.conf.orig
rename to pcs/test/resources/corosync-3nodes.conf
index 636d716..510f46d 100644
--- a/pcs/test/corosync.conf.orig
+++ b/pcs/test/resources/corosync-3nodes.conf
@@ -15,6 +15,11 @@ nodelist {
         ring0_addr: rh7-2
         nodeid: 2
     }
+
+    node {
+        ring0_addr: rh7-3
+        nodeid: 3
+    }
 }
 
 quorum {
diff --git a/pcs/test/corosync.conf b/pcs/test/resources/corosync.conf
similarity index 94%
rename from pcs/test/corosync.conf
rename to pcs/test/resources/corosync.conf
index 636d716..9daa9e5 100644
--- a/pcs/test/corosync.conf
+++ b/pcs/test/resources/corosync.conf
@@ -19,6 +19,7 @@ nodelist {
 
 quorum {
     provider: corosync_votequorum
+    two_node: 1
 }
 
 logging {
diff --git a/pcs/test/resources/crm_mon.minimal.xml b/pcs/test/resources/crm_mon.minimal.xml
new file mode 100644
index 0000000..11c530b
--- /dev/null
+++ b/pcs/test/resources/crm_mon.minimal.xml
@@ -0,0 +1,10 @@
+<?xml version="1.0"?>
+<crm_mon version="1.1.13">
+  <summary>
+      <current_dc present="false"/>
+      <nodes_configured number="0" expected_votes="unknown" />
+      <resources_configured number="0" />
+  </summary>
+  <nodes>
+  </nodes>
+</crm_mon>
diff --git a/pcs/test/transitions01.xml b/pcs/test/resources/transitions01.xml
similarity index 100%
rename from pcs/test/transitions01.xml
rename to pcs/test/resources/transitions01.xml
diff --git a/pcs/test/transitions02.xml b/pcs/test/resources/transitions02.xml
similarity index 100%
rename from pcs/test/transitions02.xml
rename to pcs/test/resources/transitions02.xml
diff --git a/pcs/test/suite.py b/pcs/test/suite.py
new file mode 100755
index 0000000..696c699
--- /dev/null
+++ b/pcs/test/suite.py
@@ -0,0 +1,87 @@
+#!/usr/bin/env python
+
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+import sys
+import os.path
+
+major, minor = sys.version_info[:2]
+if major == 2 and minor == 6:
+    import unittest2 as unittest
+else:
+    import unittest
+
+
+PACKAGE_DIR = os.path.dirname(os.path.dirname(os.path.dirname(
+    os.path.abspath(__file__)
+)))
+
+def put_package_to_path():
+    sys.path.insert(0, PACKAGE_DIR)
+
+def prepare_test_name(test_name):
+    """
+    Sometimes we have test easy accessible with fs path format like:
+    "pcs/test/test_node"
+    but loader need it in module path format like:
+    "pcs.test.test_node"
+    so is practical accept fs path format and prepare it for loader
+    """
+    return test_name.replace("/", ".")
+
+def discover_tests(test_name_list):
+    loader = unittest.TestLoader()
+    if test_name_list:
+        return loader.loadTestsFromNames(map(prepare_test_name, test_name_list))
+    return loader.discover(PACKAGE_DIR, pattern='test_*.py')
+
+def run_tests(tests, verbose=False, color=False):
+    resultclass = unittest.runner.TextTestResult
+    if color:
+        from pcs.test.tools.color_text_runner import ColorTextTestResult
+        resultclass = ColorTextTestResult
+
+    testRunner = unittest.runner.TextTestRunner(
+        verbosity=2 if verbose else 1,
+        resultclass=resultclass
+    )
+    testRunner.run(tests)
+
+put_package_to_path()
+tests = discover_tests([
+    arg for arg in sys.argv[1:] if arg not in ("-v", "--color", "--no-color")
+])
+run_tests(
+    tests,
+    verbose="-v" in sys.argv,
+    color=(
+        "--color" in sys.argv
+        or
+        (
+            sys.stdout.isatty()
+            and
+            sys.stderr.isatty()
+            and "--no-color" not in sys.argv
+        )
+    ),
+)
+
+# assume that we are in pcs root dir
+#
+# run all tests:
+# ./pcs/test/suite.py
+#
+# run with printing name of runned test:
+# pcs/test/suite.py -v
+#
+# run specific test:
+# IMPORTANT: in 2.6 module.class.method doesn't work but module.class works fine
+# pcs/test/suite.py test_acl.ACLTest -v
+# pcs/test/suite.py test_acl.ACLTest.testAutoUpgradeofCIB
+#
+# for colored test report
+# pcs/test/suite.py --color
diff --git a/pcs/test/test.py b/pcs/test/test.py
deleted file mode 100644
index 0e9b980..0000000
--- a/pcs/test/test.py
+++ /dev/null
@@ -1,23 +0,0 @@
-import xml.etree.ElementTree as ET
-
-
-tree = ET.parse('temp.xml')
-root = tree.getroot()
-print(type(tree))
-print(type(root))
-
-if type(tree) == ET.ElementTree:
-  print("ELEMENT")
-else:
-  print("FAIL")
-
-check_id = "D4"
-print(root.find(".//primitive[id=D1]"))
-print(root.find(".//primitive[@id='"+check_id+"']"))
-
-#for z in root.findall(".//*"):
-#  print z
-
-#for a in root.findall('*'):
-#  print a
-
diff --git a/pcs/test/test.sh b/pcs/test/test.sh
deleted file mode 100755
index ac94992..0000000
--- a/pcs/test/test.sh
+++ /dev/null
@@ -1,45 +0,0 @@
-rm test.xml
-cp blank.xml test.xml
-echo "Testing Resources..."
-../pcs.py -f test.xml resource create ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s
-../pcs.py -f test.xml resource create DummyRes ocf:heartbeat:Dummy fake="my fake"
-../pcs.py -f test.xml resource create ClusterIP2 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s
-../pcs.py -f test.xml resource create ClusterIP3 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s
-../pcs.py -f test.xml resource create ClusterIP3 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s
-../pcs.py -f test.xml resource create ClusterIP4 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s
-../pcs.py -f test.xml resource show
-../pcs.py -f test.xml resource delete ClusterIP3
-../pcs.py -f test.xml resource show
-../pcs.py -f test.xml resource create ClusterIP32 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s
-../pcs.py -f test.xml resource show
-../pcs.py -f test.xml resource show ClusterIP2
-../pcs.py -f test.xml resource update ClusterIP2 ip=192.168.0.100
-../pcs.py -f test.xml resource add_operation ClusterIP2 monitor interval="31s"
-../pcs.py -f test.xml resource add_operation ClusterIP2 monitor interval="32s"
-../pcs.py -f test.xml resource remove_operation ClusterIP2 monitor interval="32s"
-../pcs.py -f test.xml resource show ClusterIP2
-../pcs.py -f test.xml resource group add MyGroup ClusterIP2 ClusterIP32 ClusterIP4
-../pcs.py -f test.xml resource show
-../pcs.py -f test.xml resource group remove_resource MyGroup ClusterIP4
-../pcs.py -f test.xml resource show
-../pcs.py -f test.xml resource group add MyGroup2 ClusterIP4
-../pcs.py -f test.xml resource show
-../pcs.py -f test.xml resource group remove_resource MyGroup2 ClusterIP4
-../pcs.py -f test.xml resource show
-../pcs.py -f test.xml resource clone create ClusterIP4 globally-unique=false
-../pcs.py -f test.xml resource show
-../pcs.py -f test.xml resource show ClusterIP4-clone
-../pcs.py -f test.xml resource clone update ClusterIP4 globally-unique=true
-../pcs.py -f test.xml resource show ClusterIP4-clone
-../pcs.py -f test.xml resource create ClusterIP5 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s
-../pcs.py -f test.xml resource create ClusterIP6 ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s
-../pcs.py -f test.xml resource master create MyMaster ClusterIP5 master-max=1
-../pcs.py -f test.xml resource master update MyMaster master-max=2
-../pcs.py -f test.xml resource master create MyMaster2 ClusterIP6
-../pcs.py -f test.xml resource show
-../pcs.py -f test.xml resource show MyMaster
-../pcs.py -f test.xml resource master remove MyMaster2
-../pcs.py -f test.xml resource show
-diff test.xml final.xml
-
-
diff --git a/pcs/test/test_acl.py b/pcs/test/test_acl.py
index 9227f42..b053614 100644
--- a/pcs/test/test_acl.py
+++ b/pcs/test/test_acl.py
@@ -1,43 +1,42 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
-import os.path
-import sys
 import shutil
 import unittest
-parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-sys.path.insert(0,parentdir)
 
-import utils
-from pcs_test_functions import pcs, ac, isMinimumPacemakerVersion
-from pcs_test_functions import PcsRunner
-from pcs_test_assertions import AssertPcsMixin
+from pcs.test.tools.assertions import AssertPcsMixin
+from pcs.test.tools.misc import (
+    ac,
+    get_test_resource as rc,
+)
+from pcs.test.tools.pcs_runner import (
+    pcs,
+    PcsRunner,
+)
 
-
-old_cib = "empty.xml"
-empty_cib = "empty-1.2.xml"
-temp_cib = "temp.xml"
+old_cib = rc("cib-empty.xml")
+empty_cib = rc("cib-empty-1.2.xml")
+temp_cib = rc("temp-cib.xml")
 
 class ACLTest(unittest.TestCase, AssertPcsMixin):
     pcs_runner = None
     def setUp(self):
         shutil.copy(empty_cib, temp_cib)
-        shutil.copy("corosync.conf.orig", "corosync.conf")
         self.pcs_runner = PcsRunner(temp_cib)
 
     def testAutoUpgradeofCIB(self):
-        old_temp_cib = temp_cib + "-old"
-        shutil.copy(old_cib, old_temp_cib)
-        self.pcs_runner.testfile = old_temp_cib
+        shutil.copy(old_cib, temp_cib)
 
         self.assert_pcs_success(
             'acl show',
             "ACLs are disabled, run 'pcs acl enable' to enable\n\n"
         )
 
-        with open(old_temp_cib) as myfile:
+        with open(temp_cib) as myfile:
             data = myfile.read()
             assert data.find("pacemaker-1.2") != -1
             assert data.find("pacemaker-2.") == -1
@@ -47,7 +46,7 @@ class ACLTest(unittest.TestCase, AssertPcsMixin):
             "Cluster CIB has been upgraded to latest version\n"
         )
 
-        with open(old_temp_cib) as myfile:
+        with open(temp_cib) as myfile:
             data = myfile.read()
             assert data.find("pacemaker-1.2") == -1
             assert data.find("pacemaker-2.") != -1
@@ -136,15 +135,15 @@ Role: role3
 
         o,r = pcs("acl role create user1")
         assert r == 1
-        ac(o,"Error: user1 already exists\n")
+        ac(o,"Error: 'user1' already exists\n")
 
         o,r = pcs("acl role create group1")
         assert r == 1
-        ac(o,"Error: group1 already exists\n")
+        ac(o,"Error: 'group1' already exists\n")
 
         o,r = pcs("acl role create role1")
         assert r == 1
-        ac(o,"Error: role role1 already exists\n")
+        ac(o,"Error: 'role1' already exists\n")
 
         o,r = pcs("acl user create user1")
         assert r == 1
@@ -559,7 +558,7 @@ User: user2
         assert r == 0
 
         o,r = pcs("acl role create role0")
-        ac(o,"Error: role role0 already exists\n")
+        ac(o,"Error: 'role0' already exists\n")
         assert r == 1
 
         o,r = pcs("acl role create role0d description='empty role'")
@@ -771,16 +770,16 @@ Role: role4
     def test_can_not_add_permission_for_nonexisting_id(self):
         self.assert_pcs_success('acl role create role1')
         self.assert_pcs_fail(
-            'acl permission add role1 read id non-existent-id',
-            'Error: id "non-existent-id" does not exist.\n'
+            "acl permission add role1 read id non-existent-id",
+            "Error: id 'non-existent-id' does not exist\n"
         )
 
     def test_can_not_add_permission_for_nonexisting_id_in_later_part(self):
         self.assert_pcs_success('acl role create role1')
         self.assert_pcs_success('acl role create role2')
         self.assert_pcs_fail(
-            'acl permission add role1 read id role2 read id no-existent-id',
-            'Error: id "no-existent-id" does not exist.\n'
+            "acl permission add role1 read id role2 read id non-existent-id",
+            "Error: id 'non-existent-id' does not exist\n"
         )
 
     def test_can_not_add_permission_for_nonexisting_role_with_bad_id(self):
@@ -797,8 +796,8 @@ Role: role4
 
     def test_can_not_crate_role_with_permission_for_nonexisting_id(self):
         self.assert_pcs_fail(
-            'acl role create role1 read id non-existent-id',
-            'Error: id "non-existent-id" does not exist.\n'
+            "acl role create role1 read id non-existent-id",
+            "Error: id 'non-existent-id' does not exist\n"
         )
 
     def test_can_not_create_role_with_bad_name(self):
@@ -813,9 +812,3 @@ Role: role4
             'acl role unknown whatever',
             stdout_start="\nUsage: pcs acl role..."
         )
-
-if __name__ == "__main__":
-    if isMinimumPacemakerVersion(1,1,11):
-        unittest.main()
-    else:
-        print("WARNING: Pacemaker version is too old (must be >= 1.1.11) to test acls")
diff --git a/pcs/test/test_cluster.py b/pcs/test/test_cluster.py
index 8ec303d..2c3e71b 100644
--- a/pcs/test/test_cluster.py
+++ b/pcs/test/test_cluster.py
@@ -1,39 +1,46 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
 import os
-import sys
 import shutil
 import unittest
-parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-sys.path.insert(0, parentdir)
 
-import utils
-from pcs_test_functions import pcs, ac, isMinimumPacemakerVersion
-from pcs_test_functions import PcsRunner
-from pcs_test_assertions import AssertPcsMixin
-
-
-empty_cib = "empty-withnodes.xml"
-temp_cib = "temp.xml"
+from pcs.test.tools.assertions import AssertPcsMixin
+from pcs.test.tools.misc import (
+    ac,
+    get_test_resource as rc,
+    is_minimum_pacemaker_version,
+)
+from pcs.test.tools.pcs_runner import (
+    pcs,
+    PcsRunner,
+)
+
+from pcs import utils
+
+empty_cib = rc("cib-empty-withnodes.xml")
+temp_cib = rc("temp-cib.xml")
+cluster_conf_file = rc("cluster.conf")
+cluster_conf_tmp = rc("cluster.conf.tmp")
+corosync_conf_tmp = rc("corosync.conf.tmp")
 
 class ClusterTest(unittest.TestCase, AssertPcsMixin):
     def setUp(self):
         shutil.copy(empty_cib, temp_cib)
-        self.pcs_runner = PcsRunner(temp_cib)
-        if os.path.exists("corosync.conf.tmp"):
-            os.unlink("corosync.conf.tmp")
-        if os.path.exists("cluster.conf.tmp"):
-            os.unlink("cluster.conf.tmp")
+        self.pcs_runner = PcsRunner(
+            temp_cib, corosync_conf_tmp, cluster_conf_tmp
+        )
+        if os.path.exists(corosync_conf_tmp):
+            os.unlink(corosync_conf_tmp)
+        if os.path.exists(cluster_conf_tmp):
+            os.unlink(cluster_conf_tmp)
 
     def testNodeStandby(self):
-        output, returnVal = pcs(temp_cib, "cluster standby rh7-1")
-        ac(output, "")
-        assert returnVal == 0
-
-        # try to standby node which is already in standby mode
+        # only basic test, standby subcommands were m oved to 'pcs node'
         output, returnVal = pcs(temp_cib, "cluster standby rh7-1")
         ac(output, "")
         assert returnVal == 0
@@ -42,19 +49,6 @@ class ClusterTest(unittest.TestCase, AssertPcsMixin):
         ac(output, "")
         assert returnVal == 0
 
-        # try to unstandby node which is no in standby mode
-        output, returnVal = pcs(temp_cib, "cluster unstandby rh7-1")
-        ac(output, "")
-        assert returnVal == 0
-
-        output, returnVal = pcs(temp_cib, "cluster standby nonexistant-node")
-        assert returnVal == 1
-        assert output == "Error: node 'nonexistant-node' does not appear to exist in configuration\n"
-
-        output, returnVal = pcs(temp_cib, "cluster unstandby nonexistant-node")
-        assert returnVal == 1
-        assert output == "Error: node 'nonexistant-node' does not appear to exist in configuration\n"
-
     def testRemoteNode(self):
         o,r = pcs(temp_cib, "resource create D1 Dummy --no-default-ops")
         assert r==0 and o==""
@@ -122,7 +116,8 @@ class ClusterTest(unittest.TestCase, AssertPcsMixin):
     def test_cluster_setup_hostnames_resolving(self):
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --corosync_conf=corosync.conf.tmp --cluster_conf=cluster.conf.tmp --name cname nonexistant-address"
+            "cluster setup --local --corosync_conf={0} --cluster_conf={1} --name cname nonexistant-address"
+            .format(corosync_conf_tmp, cluster_conf_tmp)
         )
         ac(output, """\
 Error: Unable to resolve all hostnames, use --force to override
@@ -132,7 +127,8 @@ Warning: Unable to resolve hostname: nonexistant-address
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --corosync_conf=corosync.conf.tmp --cluster_conf=cluster.conf.tmp --name cname nonexistant-address --force"
+            "cluster setup --local --corosync_conf={0} --cluster_conf={1} --name cname nonexistant-address --force"
+            .format(corosync_conf_tmp, cluster_conf_tmp)
         )
         ac(output, """\
 Warning: Unable to resolve hostname: nonexistant-address
@@ -145,7 +141,8 @@ Warning: Unable to resolve hostname: nonexistant-address
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2"
+            .format(corosync_conf_tmp)
         )
         self.assertEqual("", output)
         self.assertEqual(0, returnVal)
@@ -180,31 +177,33 @@ logging {
     to_syslog: yes
 }
 """
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, corosync_conf)
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-2 rh7-3"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-2 rh7-3"
+            .format(corosync_conf_tmp)
         )
         self.assertEqual("""\
-Error: corosync.conf.tmp already exists, use --force to overwrite
-""",
+Error: {0} already exists, use --force to overwrite
+""".format(corosync_conf_tmp),
             output
         )
         self.assertEqual(1, returnVal)
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, corosync_conf)
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --force --local --corosync_conf=corosync.conf.tmp --name cname rh7-2 rh7-3"
+            "cluster setup --force --local --corosync_conf={0} --name cname rh7-2 rh7-3"
+            .format(corosync_conf_tmp)
         )
         self.assertEqual("", output)
         self.assertEqual(0, returnVal)
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -244,7 +243,8 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2"
+            .format(cluster_conf_tmp)
         )
         self.assertEqual("", output)
         self.assertEqual(0, returnVal)
@@ -277,31 +277,33 @@ logging {
   </rm>
 </cluster>
 """
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, cluster_conf)
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-2 rh7-3"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-2 rh7-3"
+            .format(cluster_conf_tmp)
         )
         self.assertEqual("""\
-Error: cluster.conf.tmp already exists, use --force to overwrite
-""",
+Error: {0} already exists, use --force to overwrite
+""".format(cluster_conf_tmp),
             output
         )
         self.assertEqual(1, returnVal)
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, cluster_conf)
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --force --local --cluster_conf=cluster.conf.tmp --name cname rh7-2 rh7-3"
+            "cluster setup --force --local --cluster_conf={0} --name cname rh7-2 rh7-3"
+            .format(cluster_conf_tmp)
         )
         self.assertEqual("", output)
         self.assertEqual(0, returnVal)
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 <cluster config_version="9" name="cname">
@@ -342,11 +344,12 @@ Error: cluster.conf.tmp already exists, use --force to overwrite
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2"
+            .format(corosync_conf_tmp)
         )
         self.assertEqual("", output)
         self.assertEqual(0, returnVal)
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -382,11 +385,12 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode add --corosync_conf=corosync.conf.tmp rh7-3"
+            "cluster localnode add --corosync_conf={0} rh7-3"
+            .format(corosync_conf_tmp)
         )
         self.assertEqual("rh7-3: successfully added!\n", output)
         self.assertEqual(0, returnVal)
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -426,11 +430,12 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode remove --corosync_conf=corosync.conf.tmp rh7-3"
+            "cluster localnode remove --corosync_conf={0} rh7-3"
+            .format(corosync_conf_tmp)
         )
         self.assertEqual(0, returnVal)
         self.assertEqual("rh7-3: successfully removed!\n", output)
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -466,11 +471,12 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode add --corosync_conf=corosync.conf.tmp rh7-3,192.168.1.3"
+            "cluster localnode add --corosync_conf={0} rh7-3,192.168.1.3"
+            .format(corosync_conf_tmp)
         )
         self.assertEqual("rh7-3,192.168.1.3: successfully added!\n", output)
         self.assertEqual(0, returnVal)
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -511,11 +517,12 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode remove --corosync_conf=corosync.conf.tmp rh7-2"
+            "cluster localnode remove --corosync_conf={0} rh7-2"
+            .format(corosync_conf_tmp)
         )
         self.assertEqual(0, returnVal)
         self.assertEqual("rh7-2: successfully removed!\n", output)
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -552,11 +559,12 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode remove --corosync_conf=corosync.conf.tmp rh7-3,192.168.1.3"
+            "cluster localnode remove --corosync_conf={0} rh7-3,192.168.1.3"
+            .format(corosync_conf_tmp)
         )
         self.assertEqual(0, returnVal)
         self.assertEqual("rh7-3,192.168.1.3: successfully removed!\n", output)
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -593,11 +601,12 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --auto_tie_breaker=1"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --auto_tie_breaker=1"
+            .format(corosync_conf_tmp)
         )
         self.assertEqual("", output)
         self.assertEqual(0, returnVal)
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -633,11 +642,12 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode add --corosync_conf=corosync.conf.tmp rh7-3"
+            "cluster localnode add --corosync_conf={0} rh7-3"
+            .format(corosync_conf_tmp)
         )
         self.assertEqual(output, "rh7-3: successfully added!\n")
         self.assertEqual(0, returnVal)
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -678,11 +688,12 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode remove --corosync_conf=corosync.conf.tmp rh7-3"
+            "cluster localnode remove --corosync_conf={0} rh7-3"
+            .format(corosync_conf_tmp)
         )
         self.assertEqual("rh7-3: successfully removed!\n", output)
         self.assertEqual(0, returnVal)
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -723,11 +734,12 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 rh7-3"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 rh7-3"
+            .format(corosync_conf_tmp)
         )
         self.assertEqual("", output)
         self.assertEqual(0, returnVal)
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -772,11 +784,12 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --transport udp"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --transport udp"
+            .format(corosync_conf_tmp)
         )
         self.assertEqual("", output)
         self.assertEqual(0, returnVal)
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -821,11 +834,12 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2"
+            .format(cluster_conf_tmp)
         )
         ac(output, "")
         self.assertEqual(returnVal, 0)
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 <cluster config_version="9" name="cname">
@@ -859,11 +873,12 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode add --cluster_conf=cluster.conf.tmp rh7-3"
+            "cluster localnode add --cluster_conf={0} rh7-3"
+            .format(cluster_conf_tmp)
         )
         ac(output, "rh7-3: successfully added!\n")
         self.assertEqual(returnVal, 0)
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 <cluster config_version="13" name="cname">
@@ -904,12 +919,13 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode remove --cluster_conf=cluster.conf.tmp rh7-3"
+            "cluster localnode remove --cluster_conf={0} rh7-3"
+            .format(cluster_conf_tmp)
         )
         ac(output, "rh7-3: successfully removed!\n")
         self.assertEqual(returnVal, 0)
 
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 <cluster config_version="15" name="cname">
@@ -943,12 +959,13 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode add --cluster_conf=cluster.conf.tmp rh7-3,192.168.1.3"
+            "cluster localnode add --cluster_conf={0} rh7-3,192.168.1.3"
+            .format(cluster_conf_tmp)
         )
         ac(output, "rh7-3,192.168.1.3: successfully added!\n")
         self.assertEqual(returnVal, 0)
 
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 <cluster config_version="20" name="cname">
@@ -990,12 +1007,13 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode remove --cluster_conf=cluster.conf.tmp rh7-2"
+            "cluster localnode remove --cluster_conf={0} rh7-2"
+            .format(cluster_conf_tmp)
         )
         ac(output, "rh7-2: successfully removed!\n")
         self.assertEqual(returnVal, 0)
 
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 <cluster config_version="22" name="cname">
@@ -1030,12 +1048,13 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster localnode remove --cluster_conf=cluster.conf.tmp rh7-3,192.168.1.3"
+            "cluster localnode remove --cluster_conf={0} rh7-3,192.168.1.3"
+            .format(cluster_conf_tmp)
         )
         ac(output, "rh7-3,192.168.1.3: successfully removed!\n")
         self.assertEqual(returnVal, 0)
 
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 <cluster config_version="23" name="cname">
@@ -1067,11 +1086,12 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 rh7-3"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 rh7-3"
+            .format(cluster_conf_tmp)
         )
         ac(output, "")
         self.assertEqual(returnVal, 0)
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 <cluster config_version="12" name="cname">
@@ -1117,13 +1137,14 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --transport udpu"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --transport udpu"
+            .format(cluster_conf_tmp)
         )
         ac(output, """\
 Warning: Using udpu transport on a CMAN cluster, cluster restart is required after node add or remove
 """)
         self.assertEqual(returnVal, 0)
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 <cluster config_version="9" name="cname">
@@ -1161,11 +1182,12 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --ipv6"
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --ipv6"
+            .format(corosync_conf_tmp)
         )
         self.assertEqual("", output)
         self.assertEqual(0, returnVal)
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -1206,13 +1228,14 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --ipv6"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --ipv6"
+            .format(cluster_conf_tmp)
         )
         ac(output, """\
 Warning: --ipv6 ignored as it is not supported on CMAN clusters
 """)
         self.assertEqual(returnVal, 0)
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 <cluster config_version="9" name="cname">
@@ -1248,18 +1271,30 @@ Warning: --ipv6 ignored as it is not supported on CMAN clusters
         if utils.is_rhel6():
             return
 
-        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr0 1.1.2.0")
+        o,r = pcs(
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr0 1.1.2.0"
+            .format(corosync_conf_tmp)
+        )
         assert r == 1
         ac(o, "Error: --addr0 can only be used once\n")
 
-        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode blah --broadcast0 --transport udp")
+        o,r = pcs(
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode blah --broadcast0 --transport udp"
+            .format(corosync_conf_tmp)
+        )
         assert r == 1
-        ac("Error: blah is an unknown RRP mode, use --force to override\n", o)
+        ac(
+            o,
+            "Error: 'blah' is not a valid RRP mode value, use passive, active, use --force to override\n"
+        )
 
-        o,r = pcs("cluster setup --transport udp --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0")
+        o,r = pcs(
+            "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0"
+            .format(corosync_conf_tmp)
+        )
         ac(o,"")
         assert r == 0
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -1312,10 +1347,13 @@ logging {
         if utils.is_rhel6():
             return
 
-        o,r = pcs("cluster setup --transport udp --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcast0 8.8.8.8 --addr1 1.1.2.0 --mcast1 9.9.9.9")
+        o,r = pcs(
+            "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcast0 8.8.8.8 --addr1 1.1.2.0 --mcast1 9.9.9.9"
+            .format(corosync_conf_tmp)
+        )
         ac(o,"")
         assert r == 0
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -1368,10 +1406,13 @@ logging {
         if utils.is_rhel6():
             return
 
-        o,r = pcs("cluster setup --transport udp --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcastport0 9999 --mcastport1 9998 --addr1 1.1.2.0")
+        o,r = pcs(
+            "cluster setup --transport udp --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcastport0 9999 --mcastport1 9998 --addr1 1.1.2.0"
+            .format(corosync_conf_tmp)
+        )
         ac(o,"")
         assert r == 0
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -1424,10 +1465,13 @@ logging {
         if utils.is_rhel6():
             return
 
-        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --ttl0 4 --ttl1 5 --transport udp")
+        o,r = pcs(
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --ttl0 4 --ttl1 5 --transport udp"
+            .format(corosync_conf_tmp)
+        )
         ac(o,"")
         assert r == 0
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -1482,14 +1526,20 @@ logging {
         if utils.is_rhel6():
             return
 
-        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp")
+        o,r = pcs(
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
+            .format(corosync_conf_tmp)
+        )
         ac(o, "Error: using a RRP mode of 'active' is not supported or tested, use --force to override\n")
         assert r == 1
 
-        o,r = pcs("cluster setup --force --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp")
+        o,r = pcs(
+            "cluster setup --force --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
+            .format(corosync_conf_tmp)
+        )
         ac(o, "Warning: using a RRP mode of 'active' is not supported or tested\n")
         assert r == 0
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -1542,14 +1592,20 @@ logging {
         if utils.is_rhel6():
             return
 
-        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp")
+        o,r = pcs(
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
+            .format(corosync_conf_tmp)
+        )
         ac(o, "Error: using a RRP mode of 'active' is not supported or tested, use --force to override\n")
         assert r == 1
 
-        o,r = pcs("cluster setup --force --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp")
+        o,r = pcs(
+            "cluster setup --force --local --corosync_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
+            .format(corosync_conf_tmp)
+        )
         ac(o, "Warning: using a RRP mode of 'active' is not supported or tested\n")
         assert r == 0
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -1601,11 +1657,17 @@ logging {
         if utils.is_rhel6():
             return
 
-        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2,192.168.99.3")
+        o,r = pcs(
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2,192.168.99.3"
+            .format(corosync_conf_tmp)
+        )
         ac(o,"Error: You cannot specify more than two addresses for a node: rh7-2,192.168.99.2,192.168.99.3\n")
         assert r == 1
 
-        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1,192.168.99.1 rh7-2")
+        o,r = pcs(
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1,192.168.99.1 rh7-2"
+            .format(corosync_conf_tmp)
+        )
         ac(o,"Error: if one node is configured for RRP, all nodes must be configured for RRP\n")
         assert r == 1
 
@@ -1613,10 +1675,13 @@ logging {
         ac(o,"Error: --addr0 and --addr1 can only be used with --transport=udp\n")
         assert r == 1
 
-        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2")
+        o,r = pcs(
+            "cluster setup --local --corosync_conf={0} --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2"
+            .format(corosync_conf_tmp)
+        )
         ac(o,"")
         assert r == 0
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -1657,34 +1722,55 @@ logging {
         if utils.is_rhel6():
             return
 
-        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name test99 rh7-1 rh7-2 --wait_for_all=2")
-        ac(o, "Error: '2' is not a valid value for --wait_for_all, use 0 or 1\n")
+        o,r = pcs(
+            "cluster setup --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --wait_for_all=2"
+            .format(corosync_conf_tmp)
+        )
+        ac(o, "Error: '2' is not a valid --wait_for_all value, use 0, 1\n")
         assert r == 1
 
-        o,r = pcs("cluster setup --force --local --corosync_conf=corosync.conf.tmp --name test99 rh7-1 rh7-2 --wait_for_all=2")
-        ac(o, "Error: '2' is not a valid value for --wait_for_all, use 0 or 1\n")
+        o,r = pcs(
+            "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --wait_for_all=2"
+            .format(corosync_conf_tmp)
+        )
+        ac(o, "Error: '2' is not a valid --wait_for_all value, use 0, 1\n")
         assert r == 1
 
-        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name test99 rh7-1 rh7-2 --auto_tie_breaker=2")
-        ac(o, "Error: '2' is not a valid value for --auto_tie_breaker, use 0 or 1\n")
+        o,r = pcs(
+            "cluster setup --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --auto_tie_breaker=2"
+            .format(corosync_conf_tmp)
+        )
+        ac(o, "Error: '2' is not a valid --auto_tie_breaker value, use 0, 1\n")
         assert r == 1
 
-        o,r = pcs("cluster setup --force --local --corosync_conf=corosync.conf.tmp --name test99 rh7-1 rh7-2 --auto_tie_breaker=2")
-        ac(o, "Error: '2' is not a valid value for --auto_tie_breaker, use 0 or 1\n")
+        o,r = pcs(
+            "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --auto_tie_breaker=2"
+            .format(corosync_conf_tmp)
+        )
+        ac(o, "Error: '2' is not a valid --auto_tie_breaker value, use 0, 1\n")
         assert r == 1
 
-        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name test99 rh7-1 rh7-2 --last_man_standing=2")
-        ac(o, "Error: '2' is not a valid value for --last_man_standing, use 0 or 1\n")
+        o,r = pcs(
+            "cluster setup --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --last_man_standing=2"
+            .format(corosync_conf_tmp)
+        )
+        ac(o, "Error: '2' is not a valid --last_man_standing value, use 0, 1\n")
         assert r == 1
 
-        o,r = pcs("cluster setup --force --local --corosync_conf=corosync.conf.tmp --name test99 rh7-1 rh7-2 --last_man_standing=2")
-        ac(o, "Error: '2' is not a valid value for --last_man_standing, use 0 or 1\n")
+        o,r = pcs(
+            "cluster setup --force --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --last_man_standing=2"
+            .format(corosync_conf_tmp)
+        )
+        ac(o, "Error: '2' is not a valid --last_man_standing value, use 0, 1\n")
         assert r == 1
 
-        o,r = pcs("cluster setup --force --local --corosync_conf=corosync.conf.tmp --name test99 rh7-1 rh7-2 --wait_for_all=1 --auto_tie_breaker=1 --last_man_standing=1 --last_man_standing_window=12000")
+        o,r = pcs(
+            "cluster setup --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --wait_for_all=1 --auto_tie_breaker=1 --last_man_standing=1 --last_man_standing_window=12000"
+            .format(corosync_conf_tmp)
+        )
         ac(o,"")
         assert r == 0
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -1734,22 +1820,24 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode blah --broadcast0 --transport udp"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode blah --broadcast0 --transport udp"
+            .format(cluster_conf_tmp)
         )
         ac(output, """\
-Error: blah is an unknown RRP mode, use --force to override
-Warning: Enabling broadcast for ring 1 as CMAN does not support broadcast in only one ring
+Error: 'blah' is not a valid RRP mode value, use passive, active, use --force to override
+Warning: Enabling broadcast for all rings as CMAN does not support broadcast in only one ring
 """)
         self.assertEqual(returnVal, 1)
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --transport udp --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0"
+            "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0"
+            .format(cluster_conf_tmp)
         )
         ac(output, "")
         self.assertEqual(returnVal, 0)
 
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 <cluster config_version="14" name="cname">
@@ -1793,12 +1881,13 @@ Warning: Enabling broadcast for ring 1 as CMAN does not support broadcast in onl
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --transport udp --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcast0 8.8.8.8 --addr1 1.1.2.0 --mcast1 9.9.9.9"
+            "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcast0 8.8.8.8 --addr1 1.1.2.0 --mcast1 9.9.9.9"
+            .format(cluster_conf_tmp)
         )
         ac(output, "")
         self.assertEqual(returnVal, 0)
 
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 <cluster config_version="14" name="cname">
@@ -1842,12 +1931,13 @@ Warning: Enabling broadcast for ring 1 as CMAN does not support broadcast in onl
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --transport udp --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcastport0 9999 --mcastport1 9998 --addr1 1.1.2.0"
+            "cluster setup --transport udp --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcastport0 9999 --mcastport1 9998 --addr1 1.1.2.0"
+            .format(cluster_conf_tmp)
         )
         ac(output, "")
         self.assertEqual(returnVal, 0)
 
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 <cluster config_version="14" name="cname">
@@ -1891,12 +1981,13 @@ Warning: Enabling broadcast for ring 1 as CMAN does not support broadcast in onl
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --ttl0 4 --ttl1 5 --transport udp"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --ttl0 4 --ttl1 5 --transport udp"
+            .format(cluster_conf_tmp)
         )
         ac(output, "")
         self.assertEqual(returnVal, 0)
 
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 <cluster config_version="14" name="cname">
@@ -1940,7 +2031,8 @@ Warning: Enabling broadcast for ring 1 as CMAN does not support broadcast in onl
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
+            .format(cluster_conf_tmp)
         )
         ac(
             output,
@@ -1950,14 +2042,15 @@ Warning: Enabling broadcast for ring 1 as CMAN does not support broadcast in onl
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --force --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
+            "cluster setup --force --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
+            .format(cluster_conf_tmp)
         )
         ac(
             output,
             "Warning: using a RRP mode of 'active' is not supported or tested\n"
         )
         self.assertEqual(returnVal, 0)
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 <cluster config_version="14" name="cname">
@@ -2001,24 +2094,26 @@ Warning: Enabling broadcast for ring 1 as CMAN does not support broadcast in onl
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
+            .format(cluster_conf_tmp)
         )
         ac(output, """\
 Error: using a RRP mode of 'active' is not supported or tested, use --force to override
-Warning: Enabling broadcast for ring 1 as CMAN does not support broadcast in only one ring
+Warning: Enabling broadcast for all rings as CMAN does not support broadcast in only one ring
 """)
         self.assertEqual(returnVal, 1)
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --force --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
+            "cluster setup --force --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
+            .format(cluster_conf_tmp)
         )
         ac(output, """\
-Warning: Enabling broadcast for ring 1 as CMAN does not support broadcast in only one ring
+Warning: Enabling broadcast for all rings as CMAN does not support broadcast in only one ring
 Warning: using a RRP mode of 'active' is not supported or tested
 """)
         self.assertEqual(returnVal, 0)
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 <cluster config_version="12" name="cname">
@@ -2059,7 +2154,8 @@ Warning: using a RRP mode of 'active' is not supported or tested
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2,192.168.99.3"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2,192.168.99.3"
+            .format(cluster_conf_tmp)
         )
         ac(output, """\
 Error: You cannot specify more than two addresses for a node: rh7-2,192.168.99.2,192.168.99.3
@@ -2077,7 +2173,7 @@ Error: if one node is configured for RRP, all nodes must be configured for RRP
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --force --local --name test99 rh7-1 rh7-2 --addr0 1.1.1.1 --transport=udpu"
+            "cluster setup --local --name test99 rh7-1 rh7-2 --addr0 1.1.1.1 --transport=udpu"
         )
         ac(output, """\
 Error: --addr0 and --addr1 can only be used with --transport=udp
@@ -2087,11 +2183,12 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2"
+            .format(cluster_conf_tmp)
         )
         ac(output, "")
         self.assertEqual(returnVal, 0)
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 <cluster config_version="12" name="cname">
@@ -2165,27 +2262,29 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode passive --broadcast0 --transport udp"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode passive --broadcast0 --transport udp"
+            .format(cluster_conf_tmp)
         )
         ac(output, """\
-Warning: Enabling broadcast for ring 1 as CMAN does not support broadcast in only one ring
+Warning: Enabling broadcast for all rings as CMAN does not support broadcast in only one ring
 """)
         self.assertEqual(returnVal, 0)
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, cluster_conf)
 
-        os.remove("cluster.conf.tmp")
+        os.remove(cluster_conf_tmp)
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --broadcast0 --transport udp"
+            "cluster setup --local --cluster_conf={0} --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --broadcast0 --transport udp"
+            .format(cluster_conf_tmp)
         )
         ac(output, """\
-Warning: Enabling broadcast for ring 1 as CMAN does not support broadcast in only one ring
+Warning: Enabling broadcast for all rings as CMAN does not support broadcast in only one ring
 """)
         self.assertEqual(returnVal, 0)
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, cluster_conf)
 
@@ -2195,7 +2294,8 @@ Warning: Enabling broadcast for ring 1 as CMAN does not support broadcast in onl
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name test99 rh7-1 rh7-2 --wait_for_all=2 --auto_tie_breaker=3 --last_man_standing=4 --last_man_standing_window=5"
+            "cluster setup --local --cluster_conf={0} --name test99 rh7-1 rh7-2 --wait_for_all=2 --auto_tie_breaker=3 --last_man_standing=4 --last_man_standing_window=5"
+            .format(cluster_conf_tmp)
         )
         ac(output, """\
 Warning: --wait_for_all ignored as it is not supported on CMAN clusters
@@ -2204,7 +2304,7 @@ Warning: --last_man_standing ignored as it is not supported on CMAN clusters
 Warning: --last_man_standing_window ignored as it is not supported on CMAN clusters
 """)
         self.assertEqual(returnVal, 0)
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 <cluster config_version="9" name="test99">
@@ -2240,10 +2340,13 @@ Warning: --last_man_standing_window ignored as it is not supported on CMAN clust
         if utils.is_rhel6():
             return
 
-        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name test99 rh7-1 rh7-2 --token 20000 --join 20001 --consensus 20002 --miss_count_const 20003 --fail_recv_const 20004 --token_coefficient 20005")
+        o,r = pcs(
+            "cluster setup --local --corosync_conf={0} --name test99 rh7-1 rh7-2 --token 20000 --join 20001 --consensus 20002 --miss_count_const 20003 --fail_recv_const 20004 --token_coefficient 20005"
+            .format(corosync_conf_tmp)
+        )
         ac(o,"")
         assert r == 0
-        with open("corosync.conf.tmp") as f:
+        with open(corosync_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 totem {
@@ -2289,12 +2392,14 @@ logging {
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name test99 rh7-1 rh7-2 --token 20000 --join 20001 --consensus 20002 --miss_count_const 20003 --fail_recv_const 20004 --token_coefficient 20005")
+            "cluster setup --local --cluster_conf={0} --name test99 rh7-1 rh7-2 --token 20000 --join 20001 --consensus 20002 --miss_count_const 20003 --fail_recv_const 20004 --token_coefficient 20005"
+            .format(cluster_conf_tmp)
+        )
         ac(output, """\
 Warning: --token_coefficient ignored as it is not supported on CMAN clusters
 """)
         self.assertEqual(returnVal, 0)
-        with open("cluster.conf.tmp") as f:
+        with open(cluster_conf_tmp) as f:
             data = f.read()
             ac(data, """\
 <cluster config_version="10" name="test99">
@@ -2329,77 +2434,77 @@ Warning: --token_coefficient ignored as it is not supported on CMAN clusters
 
     def testUIDGID(self):
         if utils.is_rhel6():
-            os.system("cp cluster.conf cluster.conf.tmp")
+            os.system("cp {0} {1}".format(cluster_conf_file, cluster_conf_tmp))
 
-            o,r = pcs("cluster uidgid --cluster_conf=cluster.conf.tmp")
+            o,r = pcs("cluster uidgid --cluster_conf={0}".format(cluster_conf_tmp))
             assert r == 0
             ac(o, "No uidgids configured in cluster.conf\n")
 
-            o,r = pcs("cluster uidgid blah --cluster_conf=cluster.conf.tmp")
+            o,r = pcs("cluster uidgid blah --cluster_conf={0}".format(cluster_conf_tmp))
             assert r == 1
             assert o.startswith("\nUsage:")
 
-            o,r = pcs("cluster uidgid rm --cluster_conf=cluster.conf.tmp")
+            o,r = pcs("cluster uidgid rm --cluster_conf={0}".format(cluster_conf_tmp))
             assert r == 1
             assert o.startswith("\nUsage:")
 
-            o,r = pcs("cluster uidgid add --cluster_conf=cluster.conf.tmp")
+            o,r = pcs("cluster uidgid add --cluster_conf={0}".format(cluster_conf_tmp))
             assert r == 1
             assert o.startswith("\nUsage:")
 
-            o,r = pcs("cluster uidgid add blah --cluster_conf=cluster.conf.tmp")
+            o,r = pcs("cluster uidgid add blah --cluster_conf={0}".format(cluster_conf_tmp))
             assert r == 1
             ac(o, "Error: uidgid options must be of the form uid=<uid> gid=<gid>\n")
 
-            o,r = pcs("cluster uidgid rm blah --cluster_conf=cluster.conf.tmp")
+            o,r = pcs("cluster uidgid rm blah --cluster_conf={0}".format(cluster_conf_tmp))
             assert r == 1
             ac(o, "Error: uidgid options must be of the form uid=<uid> gid=<gid>\n")
 
-            o,r = pcs("cluster uidgid add uid=zzz --cluster_conf=cluster.conf.tmp")
+            o,r = pcs("cluster uidgid add uid=zzz --cluster_conf={0}".format(cluster_conf_tmp))
             assert r == 0
             ac(o, "")
 
-            o,r = pcs("cluster uidgid add uid=zzz --cluster_conf=cluster.conf.tmp")
+            o,r = pcs("cluster uidgid add uid=zzz --cluster_conf={0}".format(cluster_conf_tmp))
             assert r == 1
             ac(o, "Error: unable to add uidgid\nError: uidgid entry already exists with uid=zzz, gid=\n")
 
-            o,r = pcs("cluster uidgid add gid=yyy --cluster_conf=cluster.conf.tmp")
+            o,r = pcs("cluster uidgid add gid=yyy --cluster_conf={0}".format(cluster_conf_tmp))
             assert r == 0
             ac(o, "")
 
-            o,r = pcs("cluster uidgid add uid=aaa gid=bbb --cluster_conf=cluster.conf.tmp")
+            o,r = pcs("cluster uidgid add uid=aaa gid=bbb --cluster_conf={0}".format(cluster_conf_tmp))
             assert r == 0
             ac(o, "")
 
-            o,r = pcs("cluster uidgid --cluster_conf=cluster.conf.tmp")
+            o,r = pcs("cluster uidgid --cluster_conf={0}".format(cluster_conf_tmp))
             assert r == 0
             ac(o, "UID/GID: gid=, uid=zzz\nUID/GID: gid=yyy, uid=\nUID/GID: gid=bbb, uid=aaa\n")
 
-            o,r = pcs("cluster uidgid rm gid=bbb --cluster_conf=cluster.conf.tmp")
+            o,r = pcs("cluster uidgid rm gid=bbb --cluster_conf={0}".format(cluster_conf_tmp))
             assert r == 1
             ac(o, "Error: unable to remove uidgid\nError: unable to find uidgid with uid=, gid=bbb\n")
 
-            o,r = pcs("cluster uidgid rm uid=aaa gid=bbb --cluster_conf=cluster.conf.tmp")
+            o,r = pcs("cluster uidgid rm uid=aaa gid=bbb --cluster_conf={0}".format(cluster_conf_tmp))
             assert r == 0
             ac(o, "")
 
-            o,r = pcs("cluster uidgid --cluster_conf=cluster.conf.tmp")
+            o,r = pcs("cluster uidgid --cluster_conf={0}".format(cluster_conf_tmp))
             assert r == 0
             ac(o, "UID/GID: gid=, uid=zzz\nUID/GID: gid=yyy, uid=\n")
 
-            o,r = pcs("cluster uidgid rm uid=zzz --cluster_conf=cluster.conf.tmp")
+            o,r = pcs("cluster uidgid rm uid=zzz --cluster_conf={0}".format(cluster_conf_tmp))
             assert r == 0
             ac(o, "")
 
-            o,r = pcs("config --cluster_conf=cluster.conf.tmp")
+            o,r = pcs("config --cluster_conf={0}".format(cluster_conf_tmp))
             assert r == 0
             assert o.find("UID/GID: gid=yyy, uid=") != -1
 
-            o,r = pcs("cluster uidgid rm gid=yyy --cluster_conf=cluster.conf.tmp")
+            o,r = pcs("cluster uidgid rm gid=yyy --cluster_conf={0}".format(cluster_conf_tmp))
             assert r == 0
             ac(o, "")
 
-            o,r = pcs("config --cluster_conf=cluster.conf.tmp")
+            o,r = pcs("config --cluster_conf={0}".format(cluster_conf_tmp))
             assert r == 0
             assert o.find("No uidgids") == -1
         else:
@@ -2452,7 +2557,7 @@ Warning: --token_coefficient ignored as it is not supported on CMAN clusters
             ac(o, "No uidgids configured in cluster.conf\n")
 
     def testClusterUpgrade(self):
-        if not isMinimumPacemakerVersion(1,1,11):
+        if not is_minimum_pacemaker_version(1, 1, 11):
             print("WARNING: Unable to test cluster upgrade because pacemaker is older than 1.1.11")
             return
         with open(temp_cib) as myfile:
@@ -2474,15 +2579,93 @@ Warning: --token_coefficient ignored as it is not supported on CMAN clusters
         assert r == 0
 
     def test_can_not_setup_cluster_for_unknown_transport_type(self):
+        if utils.is_rhel6():
+            return
+
         self.assert_pcs_fail(
-            'cluster setup --local --corosync_conf=corosync.conf.tmp'
-                +" --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2"
-                +" --transport=unknown"
-            ,
-            "Error: unknown transport 'unknown', use --force to override\n"
+            "cluster setup --local --name cname rh7-1 rh7-2 --transport=unknown",
+            "Error: 'unknown' is not a valid transport value, use udp, udpu, use --force to override\n"
         )
 
+        self.assert_pcs_success(
+            "cluster setup --local --name cname rh7-1 rh7-2 --transport=unknown --force",
+            "Warning: 'unknown' is not a valid transport value, use udp, udpu\n"
+        )
+        with open(corosync_conf_tmp) as f:
+            data = f.read()
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: unknown
+}
 
-if __name__ == "__main__":
-    unittest.main()
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
 
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    two_node: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
+
+    def test_can_not_setup_cluster_for_unknown_transport_type_rhel6(self):
+        if not utils.is_rhel6():
+            return
+
+        self.assert_pcs_fail(
+            "cluster setup --local --name cname rh7-1 rh7-2 --transport=rdma",
+            "Error: 'rdma' is not a valid transport value, use udp, udpu, use --force to override\n"
+        )
+
+        self.assert_pcs_success(
+            "cluster setup --local --name cname rh7-1 rh7-2 --transport=rdma --force",
+            "Warning: 'rdma' is not a valid transport value, use udp, udpu\n"
+        )
+        with open(cluster_conf_tmp) as f:
+            data = f.read()
+            ac(data, """\
+<cluster config_version="9" name="cname">
+  <fence_daemon/>
+  <clusternodes>
+    <clusternode name="rh7-1" nodeid="1">
+      <fence>
+        <method name="pcmk-method">
+          <device name="pcmk-redirect" port="rh7-1"/>
+        </method>
+      </fence>
+    </clusternode>
+    <clusternode name="rh7-2" nodeid="2">
+      <fence>
+        <method name="pcmk-method">
+          <device name="pcmk-redirect" port="rh7-2"/>
+        </method>
+      </fence>
+    </clusternode>
+  </clusternodes>
+  <cman broadcast="no" expected_votes="1" transport="rdma" two_node="1"/>
+  <fencedevices>
+    <fencedevice agent="fence_pcmk" name="pcmk-redirect"/>
+  </fencedevices>
+  <rm>
+    <failoverdomains/>
+    <resources/>
+  </rm>
+</cluster>
+""")
diff --git a/pcs/test/test_constraints.py b/pcs/test/test_constraints.py
index 2c9f963..364b40d 100644
--- a/pcs/test/test_constraints.py
+++ b/pcs/test/test_constraints.py
@@ -1,24 +1,27 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
 import os
-import sys
 import shutil
 import unittest
-parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-sys.path.insert(0,parentdir)
 
-import utils
-from pcs_test_functions import pcs, ac, isMinimumPacemakerVersion
+from pcs.test.tools.assertions import AssertPcsMixin, console_report
+from pcs.test.tools.misc import (
+    ac,
+    get_test_resource as rc,
+    is_minimum_pacemaker_version,
+)
+from pcs.test.tools.pcs_runner import pcs, PcsRunner
 
 
-empty_cib = "empty.xml"
-empty_cib_1_2 = "empty-1.2.xml"
-temp_cib = "temp.xml"
-large_cib = "large.xml"
-temp_large_cib = "temp-large.xml"
+empty_cib = rc("cib-empty.xml")
+empty_cib_1_2 = rc("cib-empty-1.2.xml")
+temp_cib = rc("temp-cib.xml")
+large_cib = rc("cib-large.xml")
 
 class ConstraintTest(unittest.TestCase):
     def setUp(self):
@@ -116,17 +119,17 @@ Location Constraints:
       Rule: score=-INFINITY  (id:location-D2-rule)
         Expression: #uname eq c00n04  (id:location-D2-rule-expr)
   Resource: D3
-    Constraint: location-D3-2
-      Rule: score=-INFINITY boolean-op=and  (id:location-D3-2-rule)
-        Expression: not_defined pingd  (id:location-D3-2-rule-expr)
-        Expression: pingd lte 0  (id:location-D3-2-rule-expr-1)
-    Constraint: location-D3-1
-      Rule: score=-INFINITY boolean-op=or  (id:location-D3-1-rule)
-        Expression: not_defined pingd  (id:location-D3-1-rule-expr)
-        Expression: pingd lte 0  (id:location-D3-1-rule-expr-1)
     Constraint: location-D3
       Rule: score-attribute=pingd  (id:location-D3-rule)
         Expression: defined pingd  (id:location-D3-rule-expr)
+    Constraint: location-D3-1
+      Rule: boolean-op=or score=-INFINITY  (id:location-D3-1-rule)
+        Expression: not_defined pingd  (id:location-D3-1-rule-expr)
+        Expression: pingd lte 0  (id:location-D3-1-rule-expr-1)
+    Constraint: location-D3-2
+      Rule: boolean-op=and score=-INFINITY  (id:location-D3-2-rule)
+        Expression: not_defined pingd  (id:location-D3-2-rule-expr)
+        Expression: pingd lte 0  (id:location-D3-2-rule-expr-1)
   Resource: D4
     Constraint: location-D4
       Rule: score=INFINITY  (id:location-D4-rule)
@@ -142,6 +145,7 @@ Location Constraints:
           Date Spec: years=2005  (id:location-D6-rule-expr-datespec)
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
 
         o,r = pcs("constraint remove location-C1-group")
@@ -165,17 +169,17 @@ Location Constraints:
       Rule: score=-INFINITY  (id:location-D2-rule)
         Expression: #uname eq c00n04  (id:location-D2-rule-expr)
   Resource: D3
-    Constraint: location-D3-2
-      Rule: score=-INFINITY boolean-op=and  (id:location-D3-2-rule)
-        Expression: not_defined pingd  (id:location-D3-2-rule-expr)
-        Expression: pingd lte 0  (id:location-D3-2-rule-expr-1)
-    Constraint: location-D3-1
-      Rule: score=-INFINITY boolean-op=or  (id:location-D3-1-rule)
-        Expression: not_defined pingd  (id:location-D3-1-rule-expr)
-        Expression: pingd lte 0  (id:location-D3-1-rule-expr-1)
     Constraint: location-D3
       Rule: score-attribute=pingd  (id:location-D3-rule)
         Expression: defined pingd  (id:location-D3-rule-expr)
+    Constraint: location-D3-1
+      Rule: boolean-op=or score=-INFINITY  (id:location-D3-1-rule)
+        Expression: not_defined pingd  (id:location-D3-1-rule-expr)
+        Expression: pingd lte 0  (id:location-D3-1-rule-expr-1)
+    Constraint: location-D3-2
+      Rule: boolean-op=and score=-INFINITY  (id:location-D3-2-rule)
+        Expression: not_defined pingd  (id:location-D3-2-rule-expr)
+        Expression: pingd lte 0  (id:location-D3-2-rule-expr-1)
   Resource: D5
     Constraint: location-D5
       Rule: score=INFINITY  (id:location-D5-rule)
@@ -187,6 +191,7 @@ Location Constraints:
           Date Spec: years=2005  (id:location-D6-rule-expr-datespec)
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
 
     def testAdvancedConstraintRule(self):
@@ -200,16 +205,17 @@ Colocation Constraints:
 Location Constraints:
   Resource: D1
     Constraint: location-D1
-      Rule: score=INFINITY boolean-op=or  (id:location-D1-rule)
+      Rule: boolean-op=or score=INFINITY  (id:location-D1-rule)
         Expression: not_defined pingd  (id:location-D1-rule-expr)
         Expression: pingd lte 0  (id:location-D1-rule-expr-1)
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
 
     def testEmptyConstraints(self):
         output, returnVal = pcs(temp_cib, "constraint")
-        assert returnVal == 0 and output == "Location Constraints:\nOrdering Constraints:\nColocation Constraints:\n", output
+        assert returnVal == 0 and output == "Location Constraints:\nOrdering Constraints:\nColocation Constraints:\nTicket Constraints:\n", output
 
     def testMultipleOrderConstraints(self):
         o,r = pcs("constraint order stop D1 then stop D2")
@@ -221,11 +227,11 @@ Colocation Constraints:
         assert r == 0
 
         o,r = pcs("constraint --full")
-        ac(o,"Location Constraints:\nOrdering Constraints:\n  stop D1 then stop D2 (kind:Mandatory) (id:order-D1-D2-mandatory)\n  start D1 then start D2 (kind:Mandatory) (id:order-D1-D2-mandatory-1)\nColocation Constraints:\n")
+        ac(o,"Location Constraints:\nOrdering Constraints:\n  stop D1 then stop D2 (kind:Mandatory) (id:order-D1-D2-mandatory)\n  start D1 then start D2 (kind:Mandatory) (id:order-D1-D2-mandatory-1)\nColocation Constraints:\nTicket Constraints:\n")
         assert r == 0
 
     def testOrderConstraintRequireAll(self):
-        if not isMinimumPacemakerVersion(1,1,12):
+        if not is_minimum_pacemaker_version(1, 1, 12):
             print("WARNING: Pacemaker version is too old (must be >= 1.1.12) to test require-all")
             return
 
@@ -243,6 +249,7 @@ Location Constraints:
 Ordering Constraints:
   start D1 then start D2 (kind:Mandatory) (Options: require-all=false) (id:order-D1-D2-mandatory)
 Colocation Constraints:
+Ticket Constraints:
 """)
         assert r == 0
 
@@ -258,28 +265,28 @@ Colocation Constraints:
 
         output, returnVal = pcs(temp_cib, "constraint --full")
         assert returnVal == 0
-        ac (output,"Location Constraints:\n  Resource: D5\n    Enabled on: node1 (score:INFINITY) (id:location-D5-node1-INFINITY)\nOrdering Constraints:\n  start Master then start D5 (kind:Mandatory) (id:order-Master-D5-mandatory)\nColocation Constraints:\n  Master with D5 (score:INFINITY) (id:colocation-Master-D5-INFINITY)\n")
+        ac (output,"Location Constraints:\n  Resource: D5\n    Enabled on: node1 (score:INFINITY) (id:location-D5-node1-INFINITY)\nOrdering Constraints:\n  start Master then start D5 (kind:Mandatory) (id:order-Master-D5-mandatory)\nColocation Constraints:\n  Master with D5 (score:INFINITY) (id:colocation-Master-D5-INFINITY)\nTicket Constraints:\n")
 
         output, returnVal = pcs(temp_cib, "constraint show --full")
         assert returnVal == 0
-        ac(output,"Location Constraints:\n  Resource: D5\n    Enabled on: node1 (score:INFINITY) (id:location-D5-node1-INFINITY)\nOrdering Constraints:\n  start Master then start D5 (kind:Mandatory) (id:order-Master-D5-mandatory)\nColocation Constraints:\n  Master with D5 (score:INFINITY) (id:colocation-Master-D5-INFINITY)\n")
+        ac(output,"Location Constraints:\n  Resource: D5\n    Enabled on: node1 (score:INFINITY) (id:location-D5-node1-INFINITY)\nOrdering Constraints:\n  start Master then start D5 (kind:Mandatory) (id:order-Master-D5-mandatory)\nColocation Constraints:\n  Master with D5 (score:INFINITY) (id:colocation-Master-D5-INFINITY)\nTicket Constraints:\n")
 
     def testLocationConstraints(self):
         output, returnVal = pcs(temp_cib, "constraint location D5 prefers node1")
         assert returnVal == 0 and output == "", output
-        
+
         output, returnVal = pcs(temp_cib, "constraint location D5 avoids node1")
         assert returnVal == 0 and output == "", output
-        
+
         output, returnVal = pcs(temp_cib, "constraint location D5 prefers node1")
         assert returnVal == 0 and output == "", output
-        
+
         output, returnVal = pcs(temp_cib, "constraint location D5 avoids node2")
         assert returnVal == 0 and output == "", output
 
         output, returnVal = pcs(temp_cib, "constraint")
         assert returnVal == 0
-        ac(output, "Location Constraints:\n  Resource: D5\n    Enabled on: node1 (score:INFINITY)\n    Disabled on: node2 (score:-INFINITY)\nOrdering Constraints:\nColocation Constraints:\n")
+        ac(output, "Location Constraints:\n  Resource: D5\n    Enabled on: node1 (score:INFINITY)\n    Disabled on: node2 (score:-INFINITY)\nOrdering Constraints:\nColocation Constraints:\nTicket Constraints:\n")
 
         output, returnVal = pcs(temp_cib, "constraint location add location-D5-node1-INFINITY ")
         assert returnVal == 1
@@ -291,7 +298,7 @@ Colocation Constraints:
 
         output, returnVal = pcs(temp_cib, "constraint location D6 prefers node1")
         assert returnVal == 0 and output == "", output
-        
+
         output, returnVal = pcs(temp_cib, "constraint remove blahblah")
         assert returnVal == 1 and output.startswith("Error: Unable to find constraint - 'blahblah'"), output
 
@@ -377,8 +384,8 @@ Colocation Constraints:
 
         o, r = pcs(temp_cib, "constraint")
         assert r == 0
-        ac(o,'Location Constraints:\nOrdering Constraints:\nColocation Constraints:\n  D1 with D3-clone (score:INFINITY)\n  D1 with D2 (score:100)\n  D1 with D2 (score:-100)\n  Master with D5 (score:100)\n  M1-master with M2-master (score:INFINITY) (rsc-role:Master) (with-rsc-role:Master)\n  M3-master with M4-master (score:INFINITY)\n  M5-master with M6-master (score:500) (rsc-role:Slave) (with-rsc-role:Started)\n  M7-master with M8-master (score:INFINITY) (rsc-role:Started) (with-rsc-ro [...]
-        
+        ac(o,'Location Constraints:\nOrdering Constraints:\nColocation Constraints:\n  D1 with D3-clone (score:INFINITY)\n  D1 with D2 (score:100)\n  D1 with D2 (score:-100)\n  Master with D5 (score:100)\n  M1-master with M2-master (score:INFINITY) (rsc-role:Master) (with-rsc-role:Master)\n  M3-master with M4-master (score:INFINITY)\n  M5-master with M6-master (score:500) (rsc-role:Slave) (with-rsc-role:Started)\n  M7-master with M8-master (score:INFINITY) (rsc-role:Started) (with-rsc-ro [...]
+
     def testColocationSets(self):
         line = "resource create D7 Dummy"
         output, returnVal = pcs(temp_cib, line)
@@ -424,7 +431,7 @@ Colocation Constraints:
         ac(o, """\
 Colocation Constraints:
   Resource Sets:
-    set D5 D6 D7 sequential=false require-all=true (id:pcs_rsc_set_D5_D6_D7) set D8 D9 action=start role=Stopped sequential=true require-all=false (id:pcs_rsc_set_D8_D9) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D5_D6_D7_set_D8_D9)
+    set D5 D6 D7 require-all=true sequential=false (id:pcs_rsc_set_D5_D6_D7) set D8 D9 action=start require-all=false role=Stopped sequential=true (id:pcs_rsc_set_D8_D9) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D5_D6_D7_set_D8_D9)
     set D5 D6 (id:pcs_rsc_set_D5_D6) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D5_D6)
     set D5 D6 action=stop role=Started (id:pcs_rsc_set_D5_D6-1) set D7 D8 action=promote role=Slave (id:pcs_rsc_set_D7_D8) set D8 D9 action=demote role=Master (id:pcs_rsc_set_D8_D9-1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D5_D6_set_D7_D8_set_D8_D9)
 """)
@@ -438,7 +445,7 @@ Colocation Constraints:
         ac(o, """\
 Colocation Constraints:
   Resource Sets:
-    set D5 D6 D7 sequential=false require-all=true (id:pcs_rsc_set_D5_D6_D7) set D8 D9 action=start role=Stopped sequential=true require-all=false (id:pcs_rsc_set_D8_D9) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D5_D6_D7_set_D8_D9)
+    set D5 D6 D7 require-all=true sequential=false (id:pcs_rsc_set_D5_D6_D7) set D8 D9 action=start require-all=false role=Stopped sequential=true (id:pcs_rsc_set_D8_D9) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D5_D6_D7_set_D8_D9)
     set D5 D6 action=stop role=Started (id:pcs_rsc_set_D5_D6-1) set D7 D8 action=promote role=Slave (id:pcs_rsc_set_D7_D8) set D8 D9 action=demote role=Master (id:pcs_rsc_set_D8_D9-1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D5_D6_set_D7_D8_set_D8_D9)
 """)
         assert r == 0
@@ -446,41 +453,41 @@ Colocation Constraints:
         o, r = pcs(temp_cib, "resource delete D5")
         ac(o,"Removing D5 from set pcs_rsc_set_D5_D6_D7\nRemoving D5 from set pcs_rsc_set_D5_D6-1\nDeleting Resource - D5\n")
         assert r == 0
-        
+
         o, r = pcs(temp_cib, "resource delete D6")
         ac(o,"Removing D6 from set pcs_rsc_set_D5_D6_D7\nRemoving D6 from set pcs_rsc_set_D5_D6-1\nRemoving set pcs_rsc_set_D5_D6-1\nDeleting Resource - D6\n")
         assert r == 0
-        
+
         o, r = pcs(temp_cib, "constraint ref D7")
         ac(o,"Resource: D7\n  pcs_rsc_colocation_set_D5_D6_D7_set_D8_D9\n  pcs_rsc_colocation_set_D5_D6_set_D7_D8_set_D8_D9\n")
         assert r == 0
-        
+
         o, r = pcs(temp_cib, "constraint ref D8")
         ac(o,"Resource: D8\n  pcs_rsc_colocation_set_D5_D6_D7_set_D8_D9\n  pcs_rsc_colocation_set_D5_D6_set_D7_D8_set_D8_D9\n")
         assert r == 0
-        
+
         output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 sequential=foo")
-        ac(output, "Error: invalid value 'foo' of option 'sequential', allowed values are: true, false\n")
+        ac(output, "Error: 'foo' is not a valid sequential value, use true, false\n")
         self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 require-all=foo")
-        ac(output, "Error: invalid value 'foo' of option 'require-all', allowed values are: true, false\n")
+        ac(output, "Error: 'foo' is not a valid require-all value, use true, false\n")
         self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 role=foo")
-        ac(output, "Error: invalid value 'foo' of option 'role', allowed values are: Stopped, Started, Master, Slave\n")
+        ac(output, "Error: 'foo' is not a valid role value, use Stopped, Started, Master, Slave\n")
         self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 action=foo")
-        ac(output, "Error: invalid value 'foo' of option 'action', allowed values are: start, promote, demote, stop\n")
+        ac(output, "Error: 'foo' is not a valid action value, use start, promote, demote, stop\n")
         self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 foo=bar")
-        ac(output, "Error: invalid option 'foo', allowed options are: action, role, sequential, require-all\n")
+        ac(output, "Error: invalid option 'foo', allowed options are: action, require-all, role, sequential\n")
         self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 setoptions foo=bar")
-        ac(output, "Error: invalid option 'foo', allowed options are: score, score-attribute, score-attribute-mangle, id\n")
+        ac(output, "Error: invalid option 'foo', allowed options are: id, score, score-attribute, score-attribute-mangle\n")
         self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 setoptions score=foo")
@@ -496,7 +503,7 @@ Colocation Constraints:
         self.assertEqual(0, retValue)
 
     def testConstraintResourceDiscovery(self):
-        if not isMinimumPacemakerVersion(1,1,12):
+        if not is_minimum_pacemaker_version(1, 1, 12):
             print("WARNING: Pacemaker version is too old (must be >= 1.1.12) to test resource-discovery")
             return
 
@@ -517,7 +524,21 @@ Colocation Constraints:
         assert r == 0
 
         o,r = pcs("constraint --full")
-        ac(o,"Location Constraints:\n  Resource: crd\n    Constraint: location-crd (resource-discovery=exclusive)\n      Rule: score=-INFINITY boolean-op=and  (id:location-crd-rule)\n        Expression: opsrole ne controller0  (id:location-crd-rule-expr)\n        Expression: opsrole ne controller1  (id:location-crd-rule-expr-1)\n  Resource: crd1\n    Constraint: location-crd1 (resource-discovery=exclusive)\n      Rule: score=-INFINITY  (id:location-crd1-rule)\n        Expression: opsrole [...]
+        ac(o, '\n'.join([
+            'Location Constraints:',
+            '  Resource: crd',
+            '    Constraint: location-crd (resource-discovery=exclusive)',
+            '      Rule: boolean-op=and score=-INFINITY  (id:location-crd-rule)',
+            '        Expression: opsrole ne controller0  (id:location-crd-rule-expr)',
+            '        Expression: opsrole ne controller1  (id:location-crd-rule-expr-1)',
+            '  Resource: crd1',
+            '    Constraint: location-crd1 (resource-discovery=exclusive)',
+            '      Rule: score=-INFINITY  (id:location-crd1-rule)',
+            '        Expression: opsrole2 ne controller2  (id:location-crd1-rule-expr)',
+            'Ordering Constraints:',
+            'Colocation Constraints:',
+            'Ticket Constraints:',
+        ])+'\n')
         assert r == 0
 
         o,r = pcs("constraint delete location-crd")
@@ -529,7 +550,7 @@ Colocation Constraints:
         assert r==0
 
         o,r = pcs("constraint --full")
-        ac(o,"Location Constraints:\nOrdering Constraints:\nColocation Constraints:\n")
+        ac(o,"Location Constraints:\nOrdering Constraints:\nColocation Constraints:\nTicket Constraints:\n")
         assert r == 0
 
         o,r = pcs("constraint location add my_constraint_id crd my_node -INFINITY resource-discovery=always")
@@ -541,7 +562,7 @@ Colocation Constraints:
         assert r == 0
 
         o,r = pcs("constraint --full")
-        ac(o,"Location Constraints:\n  Resource: crd\n    Disabled on: my_node (score:-INFINITY) (resource-discovery=always) (id:my_constraint_id)\n  Resource: crd1\n    Disabled on: my_node (score:-INFINITY) (resource-discovery=never) (id:my_constraint_id2)\nOrdering Constraints:\nColocation Constraints:\n")
+        ac(o,"Location Constraints:\n  Resource: crd\n    Disabled on: my_node (score:-INFINITY) (resource-discovery=always) (id:my_constraint_id)\n  Resource: crd1\n    Disabled on: my_node (score:-INFINITY) (resource-discovery=never) (id:my_constraint_id2)\nOrdering Constraints:\nColocation Constraints:\nTicket Constraints:\n")
         assert r == 0
 
         o,r = pcs("constraint location add my_constraint_id3 crd1 my_node2 -INFINITY bad-opt=test")
@@ -665,7 +686,7 @@ Colocation Constraints:
         ac(o,"""\
 Ordering Constraints:
   Resource Sets:
-    set D5 D6 D7 sequential=false require-all=true (id:pcs_rsc_set_D5_D6_D7) set D8 D9 action=start role=Stopped sequential=true require-all=false (id:pcs_rsc_set_D8_D9) (id:pcs_rsc_order_set_D5_D6_D7_set_D8_D9)
+    set D5 D6 D7 require-all=true sequential=false (id:pcs_rsc_set_D5_D6_D7) set D8 D9 action=start require-all=false role=Stopped sequential=true (id:pcs_rsc_set_D8_D9) (id:pcs_rsc_order_set_D5_D6_D7_set_D8_D9)
     set D5 D6 (id:pcs_rsc_set_D5_D6) (id:pcs_rsc_order_set_D5_D6)
     set D5 D6 action=stop role=Started (id:pcs_rsc_set_D5_D6-1) set D7 D8 action=promote role=Slave (id:pcs_rsc_set_D7_D8) set D8 D9 action=demote role=Master (id:pcs_rsc_set_D8_D9-1) (id:pcs_rsc_order_set_D5_D6_set_D7_D8_set_D8_D9)
 """)
@@ -679,36 +700,36 @@ Ordering Constraints:
         ac(o,"""\
 Ordering Constraints:
   Resource Sets:
-    set D5 D6 D7 sequential=false require-all=true (id:pcs_rsc_set_D5_D6_D7) set D8 D9 action=start role=Stopped sequential=true require-all=false (id:pcs_rsc_set_D8_D9) (id:pcs_rsc_order_set_D5_D6_D7_set_D8_D9)
+    set D5 D6 D7 require-all=true sequential=false (id:pcs_rsc_set_D5_D6_D7) set D8 D9 action=start require-all=false role=Stopped sequential=true (id:pcs_rsc_set_D8_D9) (id:pcs_rsc_order_set_D5_D6_D7_set_D8_D9)
     set D5 D6 action=stop role=Started (id:pcs_rsc_set_D5_D6-1) set D7 D8 action=promote role=Slave (id:pcs_rsc_set_D7_D8) set D8 D9 action=demote role=Master (id:pcs_rsc_set_D8_D9-1) (id:pcs_rsc_order_set_D5_D6_set_D7_D8_set_D8_D9)
 """)
-        
+
         o, r = pcs(temp_cib, "resource delete D5")
         ac(o,"Removing D5 from set pcs_rsc_set_D5_D6_D7\nRemoving D5 from set pcs_rsc_set_D5_D6-1\nDeleting Resource - D5\n")
         assert r == 0
-        
+
         o, r = pcs(temp_cib, "resource delete D6")
         ac(o,"Removing D6 from set pcs_rsc_set_D5_D6_D7\nRemoving D6 from set pcs_rsc_set_D5_D6-1\nRemoving set pcs_rsc_set_D5_D6-1\nDeleting Resource - D6\n")
         assert r == 0
 
         output, retValue = pcs(temp_cib, "constraint order set D1 D2 sequential=foo")
-        ac(output, "Error: invalid value 'foo' of option 'sequential', allowed values are: true, false\n")
+        ac(output, "Error: 'foo' is not a valid sequential value, use true, false\n")
         self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint order set D1 D2 require-all=foo")
-        ac(output, "Error: invalid value 'foo' of option 'require-all', allowed values are: true, false\n")
+        ac(output, "Error: 'foo' is not a valid require-all value, use true, false\n")
         self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint order set D1 D2 role=foo")
-        ac(output, "Error: invalid value 'foo' of option 'role', allowed values are: Stopped, Started, Master, Slave\n")
+        ac(output, "Error: 'foo' is not a valid role value, use Stopped, Started, Master, Slave\n")
         self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint order set D1 D2 action=foo")
-        ac(output, "Error: invalid value 'foo' of option 'action', allowed values are: start, promote, demote, stop\n")
+        ac(output, "Error: 'foo' is not a valid action value, use start, promote, demote, stop\n")
         self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint order set D1 D2 foo=bar")
-        ac(output, "Error: invalid option 'foo', allowed options are: action, role, sequential, require-all\n")
+        ac(output, "Error: invalid option 'foo', allowed options are: action, require-all, role, sequential\n")
         self.assertEqual(1, retValue)
 
         output, retValue = pcs(
@@ -716,7 +737,7 @@ Ordering Constraints:
             "constraint order set D1 D2 setoptions foo=bar"
         )
         ac(output, """\
-Error: invalid option 'foo', allowed options are: kind, symmetrical, id
+Error: invalid option 'foo', allowed options are: id, kind, symmetrical
 """)
         self.assertEqual(1, retValue)
 
@@ -724,18 +745,14 @@ Error: invalid option 'foo', allowed options are: kind, symmetrical, id
             temp_cib,
             "constraint order set D1 D2 setoptions kind=foo"
         )
-        ac(output, """\
-Error: invalid kind value 'foo', allowed values are: Optional, Mandatory, Serialize
-""")
+        ac(output, "Error: 'foo' is not a valid kind value, use Optional, Mandatory, Serialize\n")
         self.assertEqual(1, retValue)
 
         output, retValue = pcs(
             temp_cib,
             "constraint order set D1 D2 setoptions symmetrical=foo"
         )
-        ac(output, """\
-Error: invalid symmetrical value 'foo', allowed values are: true, false
-""")
+        ac(output, "Error: 'foo' is not a valid symmetrical value, use true, false\n")
         self.assertEqual(1, retValue)
 
         output, retValue = pcs(
@@ -750,10 +767,11 @@ Error: invalid symmetrical value 'foo', allowed values are: true, false
 Location Constraints:
 Ordering Constraints:
   Resource Sets:
-    set D7 sequential=false require-all=true (id:pcs_rsc_set_D5_D6_D7) set D8 D9 action=start role=Stopped sequential=true require-all=false (id:pcs_rsc_set_D8_D9) (id:pcs_rsc_order_set_D5_D6_D7_set_D8_D9)
+    set D7 require-all=true sequential=false (id:pcs_rsc_set_D5_D6_D7) set D8 D9 action=start require-all=false role=Stopped sequential=true (id:pcs_rsc_set_D8_D9) (id:pcs_rsc_order_set_D5_D6_D7_set_D8_D9)
     set D7 D8 action=promote role=Slave (id:pcs_rsc_set_D7_D8) set D8 D9 action=demote role=Master (id:pcs_rsc_set_D8_D9-1) (id:pcs_rsc_order_set_D5_D6_set_D7_D8_set_D8_D9)
-    set D1 D2 (id:pcs_rsc_set_D1_D2) setoptions symmetrical=false kind=Mandatory (id:pcs_rsc_order_set_D1_D2)
+    set D1 D2 (id:pcs_rsc_set_D1_D2) setoptions kind=Mandatory symmetrical=false (id:pcs_rsc_order_set_D1_D2)
 Colocation Constraints:
+Ticket Constraints:
 """)
         self.assertEqual(0, retValue)
 
@@ -795,12 +813,13 @@ Location Constraints:
           Date Spec: hours=9-16 weekdays=1-5  (id:location-D2-rh7-2-INFINITY-rule-expr-datespec)
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
-        
+
         o, r = pcs(temp_cib, "constraint rule remove location-D1-rh7-1-INFINITY-rule-1")
         ac(o,"Removing Rule: location-D1-rh7-1-INFINITY-rule-1\n")
         assert r == 0
-        
+
         o, r = pcs(temp_cib, "constraint rule remove location-D1-rh7-1-INFINITY-rule-2")
         assert r == 0 and o == "Removing Rule: location-D1-rh7-1-INFINITY-rule-2\n", o
 
@@ -819,6 +838,7 @@ Location Constraints:
           Date Spec: hours=9-16 weekdays=1-5  (id:location-D2-rh7-2-INFINITY-rule-expr-datespec)
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
 
         o, r = pcs(temp_cib, "constraint rule remove location-D1-rh7-1-INFINITY-rule")
@@ -835,6 +855,7 @@ Location Constraints:
           Date Spec: hours=9-16 weekdays=1-5  (id:location-D2-rh7-2-INFINITY-rule-expr-datespec)
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
 
         o,r = pcs("constraint location D1 rule role=master")
@@ -867,10 +888,11 @@ Colocation Constraints:
 Location Constraints:
   Resource: stateful0
     Constraint: location-stateful0
-      Rule: score=INFINITY role=master  (id:location-stateful0-rule)
+      Rule: role=master score=INFINITY  (id:location-stateful0-rule)
         Expression: #uname eq rh7-1  (id:location-stateful0-rule-expr)
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
         assert r == 0
 
@@ -956,11 +978,11 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         assert r == 1
 
         o,r = pcs("constraint order set stateful1 dummy1")
-        ac(o,"Error: stateful1 is a master/slave resource, you should use the master id: stateful1-master when adding constraints. Use --force to override.\n")
+        ac(o,"Error: stateful1 is a master/slave resource, you should use the master id: stateful1-master when adding constraints, use --force to override\n")
         assert r == 1
 
         o,r = pcs("constraint order set dummy1 statefulG")
-        ac(o,"Error: statefulG is a master/slave resource, you should use the master id: statefulG-master when adding constraints. Use --force to override.\n")
+        ac(o,"Error: statefulG is a master/slave resource, you should use the master id: statefulG-master when adding constraints, use --force to override\n")
         assert r == 1
 
         o,r = pcs("constraint colocation add stateful1 with dummy1")
@@ -972,15 +994,15 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         assert r == 1
 
         o,r = pcs("constraint colocation set dummy1 stateful1")
-        ac(o,"Error: stateful1 is a master/slave resource, you should use the master id: stateful1-master when adding constraints. Use --force to override.\n")
+        ac(o,"Error: stateful1 is a master/slave resource, you should use the master id: stateful1-master when adding constraints, use --force to override\n")
         assert r == 1
 
         o,r = pcs("constraint colocation set statefulG dummy1")
-        ac(o,"Error: statefulG is a master/slave resource, you should use the master id: statefulG-master when adding constraints. Use --force to override.\n")
+        ac(o,"Error: statefulG is a master/slave resource, you should use the master id: statefulG-master when adding constraints, use --force to override\n")
         assert r == 1
 
         o,r = pcs("constraint --full")
-        ac(o,"Location Constraints:\nOrdering Constraints:\nColocation Constraints:\n")
+        ac(o,"Location Constraints:\nOrdering Constraints:\nColocation Constraints:\nTicket Constraints:\n")
         assert r == 0
 
         o,r = pcs("constraint location stateful1 prefers rh7-1 --force")
@@ -996,7 +1018,7 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         assert r == 0
 
         o,r = pcs("constraint order set stateful1 dummy1 --force")
-        ac(o,"")
+        ac(o,"Warning: stateful1 is a master/slave resource, you should use the master id: stateful1-master when adding constraints\n")
         assert r == 0
 
         o,r = pcs("constraint colocation add stateful1 with dummy1 --force")
@@ -1004,7 +1026,7 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         assert r == 0
 
         o,r = pcs("constraint colocation set stateful1 dummy1 --force")
-        ac(o,"")
+        ac(o,"Warning: stateful1 is a master/slave resource, you should use the master id: stateful1-master when adding constraints\n")
         assert r == 0
 
         o,r = pcs("constraint --full")
@@ -1024,6 +1046,7 @@ Colocation Constraints:
   stateful1 with dummy1 (score:INFINITY) (id:colocation-stateful1-dummy1-INFINITY)
   Resource Sets:
     set stateful1 dummy1 (id:pcs_rsc_set_stateful1_dummy1-1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_stateful1_dummy1)
+Ticket Constraints:
 """)
         assert r == 0
 
@@ -1153,6 +1176,7 @@ Colocation Constraints:
   Resource Sets:
     set dummy1 stateful1-master (id:pcs_rsc_set_dummy1_stateful1-master) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_stateful1)
     set statefulG-master dummy1 (id:pcs_rsc_set_statefulG-master_dummy1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_statefulG_dummy1)
+Ticket Constraints:
 """)
         self.assertEqual(0, returnVal)
 
@@ -1202,7 +1226,10 @@ Error: duplicate constraint already exists, use --force to override
         output, returnVal = pcs(
             "constraint order set stateful1 dummy1 --autocorrect --force"
         )
-        ac(output, "")
+        ac(output, console_report(
+            "Warning: duplicate constraint already exists",
+            "  set stateful1-master dummy1 (id:pcs_rsc_set_stateful1-master_dummy1) (id:pcs_rsc_order_set_stateful1_dummy1)",
+        ))
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
@@ -1232,7 +1259,10 @@ Error: duplicate constraint already exists, use --force to override
         output, returnVal = pcs(
             "constraint colocation set dummy1 stateful1 --autocorrect --force"
         )
-        ac(output, "")
+        ac(output, console_report(
+            "Warning: duplicate constraint already exists",
+            "  set dummy1 stateful1-master (id:pcs_rsc_set_dummy1_stateful1-master) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_stateful1)",
+        ))
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint --full")
@@ -1240,12 +1270,12 @@ Error: duplicate constraint already exists, use --force to override
 Location Constraints:
   Resource: stateful1-master
     Enabled on: rh7-1 (score:INFINITY) (id:location-stateful1-rh7-1-INFINITY)
-    Constraint: location-stateful1-master-1
-      Rule: score=INFINITY  (id:location-stateful1-master-1-rule)
-        Expression: #uname eq rh7-1  (id:location-stateful1-master-1-rule-expr)
     Constraint: location-stateful1-master
       Rule: score=INFINITY  (id:location-stateful1-master-rule)
         Expression: #uname eq rh7-1  (id:location-stateful1-master-rule-expr)
+    Constraint: location-stateful1-master-1
+      Rule: score=INFINITY  (id:location-stateful1-master-1-rule)
+        Expression: #uname eq rh7-1  (id:location-stateful1-master-1-rule-expr)
   Resource: statefulG-master
     Enabled on: rh7-1 (score:INFINITY) (id:location-statefulG-rh7-1-INFINITY)
     Constraint: location-statefulG-master
@@ -1267,6 +1297,7 @@ Colocation Constraints:
     set dummy1 stateful1-master (id:pcs_rsc_set_dummy1_stateful1-master) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_stateful1)
     set statefulG-master dummy1 (id:pcs_rsc_set_statefulG-master_dummy1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_statefulG_dummy1)
     set dummy1 stateful1-master (id:pcs_rsc_set_dummy1_stateful1-master-1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_stateful1-1)
+Ticket Constraints:
 """)
         self.assertEqual(0, returnVal)
 
@@ -1314,11 +1345,11 @@ Colocation Constraints:
         assert r == 1
 
         o,r = pcs("constraint order set dummy1 dummy")
-        ac(o,"Error: dummy is a clone resource, you should use the clone id: dummy-clone when adding constraints. Use --force to override.\n")
+        ac(o,"Error: dummy is a clone resource, you should use the clone id: dummy-clone when adding constraints, use --force to override\n")
         assert r == 1
 
         o,r = pcs("constraint order set dummyG dummy1")
-        ac(o,"Error: dummyG is a clone resource, you should use the clone id: dummyG-clone when adding constraints. Use --force to override.\n")
+        ac(o,"Error: dummyG is a clone resource, you should use the clone id: dummyG-clone when adding constraints, use --force to override\n")
         assert r == 1
 
         o,r = pcs("constraint colocation add dummy with dummy1")
@@ -1330,15 +1361,15 @@ Colocation Constraints:
         assert r == 1
 
         o,r = pcs("constraint colocation set dummy1 dummy")
-        ac(o,"Error: dummy is a clone resource, you should use the clone id: dummy-clone when adding constraints. Use --force to override.\n")
+        ac(o,"Error: dummy is a clone resource, you should use the clone id: dummy-clone when adding constraints, use --force to override\n")
         assert r == 1
 
         o,r = pcs("constraint colocation set dummy1 dummyG")
-        ac(o,"Error: dummyG is a clone resource, you should use the clone id: dummyG-clone when adding constraints. Use --force to override.\n")
+        ac(o,"Error: dummyG is a clone resource, you should use the clone id: dummyG-clone when adding constraints, use --force to override\n")
         assert r == 1
 
         o,r = pcs("constraint --full")
-        ac(o,"Location Constraints:\nOrdering Constraints:\nColocation Constraints:\n")
+        ac(o,"Location Constraints:\nOrdering Constraints:\nColocation Constraints:\nTicket Constraints:\n")
         assert r == 0
 
         o,r = pcs("constraint location dummy prefers rh7-1 --force")
@@ -1354,7 +1385,7 @@ Colocation Constraints:
         assert r == 0
 
         o,r = pcs("constraint order set dummy1 dummy --force")
-        ac(o,"")
+        ac(o,"Warning: dummy is a clone resource, you should use the clone id: dummy-clone when adding constraints\n")
         assert r == 0
 
         o,r = pcs("constraint colocation add dummy with dummy1 --force")
@@ -1362,7 +1393,7 @@ Colocation Constraints:
         assert r == 0
 
         o,r = pcs("constraint colocation set dummy1 dummy --force")
-        ac(o,"")
+        ac(o,"Warning: dummy is a clone resource, you should use the clone id: dummy-clone when adding constraints\n")
         assert r == 0
 
         o,r = pcs("constraint --full")
@@ -1382,6 +1413,7 @@ Colocation Constraints:
   dummy with dummy1 (score:INFINITY) (id:colocation-dummy-dummy1-INFINITY)
   Resource Sets:
     set dummy1 dummy (id:pcs_rsc_set_dummy1_dummy-1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_dummy)
+Ticket Constraints:
 """)
         assert r == 0
 
@@ -1503,6 +1535,7 @@ Colocation Constraints:
   Resource Sets:
     set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone-1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_dummy)
     set dummy1 dummyG-clone (id:pcs_rsc_set_dummy1_dummyG-clone) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_dummyG)
+Ticket Constraints:
 """)
         self.assertEqual(0, returnVal)
 
@@ -1552,7 +1585,10 @@ Error: duplicate constraint already exists, use --force to override
         output, returnVal = pcs(
             "constraint order set dummy1 dummy --autocorrect --force"
         )
-        ac(output, "")
+        ac(output, console_report(
+            "Warning: duplicate constraint already exists",
+            "  set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone) (id:pcs_rsc_order_set_dummy1_dummy)",
+        ))
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
@@ -1582,7 +1618,10 @@ Error: duplicate constraint already exists, use --force to override
         output, returnVal = pcs(
             "constraint colocation set dummy1 dummy --autocorrect --force"
         )
-        ac(output, "")
+        ac(output, console_report(
+            "Warning: duplicate constraint already exists",
+            "  set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone-1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_dummy)",
+        ))
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint --full")
@@ -1590,12 +1629,12 @@ Error: duplicate constraint already exists, use --force to override
 Location Constraints:
   Resource: dummy-clone
     Enabled on: rh7-1 (score:INFINITY) (id:location-dummy-rh7-1-INFINITY)
-    Constraint: location-dummy-clone-1
-      Rule: score=INFINITY  (id:location-dummy-clone-1-rule)
-        Expression: #uname eq rh7-1  (id:location-dummy-clone-1-rule-expr)
     Constraint: location-dummy-clone
       Rule: score=INFINITY  (id:location-dummy-clone-rule)
         Expression: #uname eq rh7-1  (id:location-dummy-clone-rule-expr)
+    Constraint: location-dummy-clone-1
+      Rule: score=INFINITY  (id:location-dummy-clone-1-rule)
+        Expression: #uname eq rh7-1  (id:location-dummy-clone-1-rule-expr)
   Resource: dummyG-clone
     Enabled on: rh7-1 (score:INFINITY) (id:location-dummyG-rh7-1-INFINITY)
     Constraint: location-dummyG-clone
@@ -1617,6 +1656,7 @@ Colocation Constraints:
     set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone-1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_dummy)
     set dummy1 dummyG-clone (id:pcs_rsc_set_dummy1_dummyG-clone) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_dummyG)
     set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone-3) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_dummy-1)
+Ticket Constraints:
 """)
         self.assertEqual(0, returnVal)
 
@@ -1626,49 +1666,49 @@ Colocation Constraints:
         os.system("CIB_file="+temp_cib+" cibadmin -R --scope constraints --xml-text '<constraints><rsc_location id=\"cli-prefer-stateful0-master\" role=\"Master\" rsc=\"stateful0-master\" node=\"rh7-1\" score=\"INFINITY\"/><rsc_location id=\"cli-ban-stateful0-master-on-rh7-1\" rsc=\"stateful0-master\" role=\"Slave\" node=\"rh7-1\" score=\"-INFINITY\"/></constraints>'")
 
         o,r = pcs("constraint")
-        ac(o,"Location Constraints:\n  Resource: stateful0-master\n    Enabled on: rh7-1 (score:INFINITY) (role: Master)\n    Disabled on: rh7-1 (score:-INFINITY) (role: Slave)\nOrdering Constraints:\nColocation Constraints:\n")
+        ac(o,"Location Constraints:\n  Resource: stateful0-master\n    Enabled on: rh7-1 (score:INFINITY) (role: Master)\n    Disabled on: rh7-1 (score:-INFINITY) (role: Slave)\nOrdering Constraints:\nColocation Constraints:\nTicket Constraints:\n")
         assert r == 0
 
     def testManyConstraints(self):
-        shutil.copy(large_cib, temp_large_cib)
+        shutil.copy(large_cib, temp_cib)
 
-        output, returnVal = pcs(temp_large_cib, "constraint location dummy prefers rh7-1")
+        output, returnVal = pcs(temp_cib, "constraint location dummy prefers rh7-1")
         ac(output, "")
         assert returnVal == 0
 
-        output, returnVal = pcs(temp_large_cib, "constraint location show resources dummy --full")
+        output, returnVal = pcs(temp_cib, "constraint location show resources dummy --full")
         ac(output, "Location Constraints:\n  Resource: dummy\n    Enabled on: rh7-1 (score:INFINITY) (id:location-dummy-rh7-1-INFINITY)\n")
         assert returnVal == 0
 
-        output, returnVal = pcs(temp_large_cib, "constraint location remove location-dummy-rh7-1-INFINITY")
+        output, returnVal = pcs(temp_cib, "constraint location remove location-dummy-rh7-1-INFINITY")
         ac(output, "")
         assert returnVal == 0
 
-        output, returnVal = pcs(temp_large_cib, "constraint colocation add dummy1 with dummy2")
+        output, returnVal = pcs(temp_cib, "constraint colocation add dummy1 with dummy2")
         ac(output, "")
         assert returnVal == 0
 
-        output, returnVal = pcs(temp_large_cib, "constraint colocation remove dummy1 dummy2")
+        output, returnVal = pcs(temp_cib, "constraint colocation remove dummy1 dummy2")
         ac(output, "")
         assert returnVal == 0
 
-        output, returnVal = pcs(temp_large_cib, "constraint order dummy1 then dummy2")
+        output, returnVal = pcs(temp_cib, "constraint order dummy1 then dummy2")
         ac(output, "Adding dummy1 dummy2 (kind: Mandatory) (Options: first-action=start then-action=start)\n")
         assert returnVal == 0
 
-        output, returnVal = pcs(temp_large_cib, "constraint order remove dummy1")
+        output, returnVal = pcs(temp_cib, "constraint order remove dummy1")
         ac(output, "")
         assert returnVal == 0
 
-        output, returnVal = pcs(temp_large_cib, "constraint location dummy prefers rh7-1")
+        output, returnVal = pcs(temp_cib, "constraint location dummy prefers rh7-1")
         ac(output, "")
         assert returnVal == 0
 
-        output, returnVal = pcs(temp_large_cib, "constraint location show resources dummy --full")
+        output, returnVal = pcs(temp_cib, "constraint location show resources dummy --full")
         ac(output, "Location Constraints:\n  Resource: dummy\n    Enabled on: rh7-1 (score:INFINITY) (id:location-dummy-rh7-1-INFINITY)\n")
         assert returnVal == 0
 
-        output, returnVal = pcs(temp_large_cib, "constraint remove location-dummy-rh7-1-INFINITY")
+        output, returnVal = pcs(temp_cib, "constraint remove location-dummy-rh7-1-INFINITY")
         ac(output, "")
         assert returnVal == 0
 
@@ -1708,6 +1748,7 @@ Ordering Constraints:
   start D6 then start D1-clone (kind:Mandatory) (id:order-D6-D1-mandatory)
 Colocation Constraints:
   D1-clone with D5 (score:INFINITY) (id:colocation-D1-D5-INFINITY)
+Ticket Constraints:
 """)
         assert returnVal == 0
 
@@ -1747,6 +1788,7 @@ Ordering Constraints:
   start D6 then start D1-master (kind:Mandatory) (id:order-D6-D1-mandatory)
 Colocation Constraints:
   D1-master with D5 (score:INFINITY) (id:colocation-D1-D5-INFINITY)
+Ticket Constraints:
 """)
         assert returnVal == 0
 
@@ -1790,6 +1832,7 @@ Ordering Constraints:
   start D6 then start DG-clone (kind:Mandatory) (id:order-D6-DG-mandatory)
 Colocation Constraints:
   DG-clone with D5 (score:INFINITY) (id:colocation-DG-D5-INFINITY)
+Ticket Constraints:
 """)
         assert returnVal == 0
 
@@ -1833,6 +1876,7 @@ Ordering Constraints:
   start D6 then start DG-master (kind:Mandatory) (id:order-D6-DG-mandatory)
 Colocation Constraints:
   DG-master with D5 (score:INFINITY) (id:colocation-DG-D5-INFINITY)
+Ticket Constraints:
 """)
         assert returnVal == 0
 
@@ -1881,6 +1925,7 @@ Location Constraints:
     Disabled on: guest1 (score:-400) (id:location-D2-guest1--400)
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
         self.assertEqual(0, returnVal)
 
@@ -1901,6 +1946,7 @@ Location Constraints:
     Disabled on: node2 (score:-300) (id:location-D2-node2--300)
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
         self.assertEqual(0, returnVal)
 
@@ -1936,6 +1982,7 @@ Location Constraints:
     Disabled on: guest1 (score:-400) (id:location-D2-guest1--400)
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
         self.assertEqual(0, returnVal)
 
@@ -1954,6 +2001,7 @@ Location Constraints:
     Disabled on: node2 (score:-300) (id:location-D2-node2--300)
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
         self.assertEqual(0, returnVal)
 
@@ -2073,6 +2121,7 @@ Ordering Constraints:
   stop D5 then stop D6 (kind:Mandatory) (id:order-D5-D6-mandatory)
   stop D5 then stop D6 (kind:Mandatory) (id:order-D5-D6-mandatory-1)
 Colocation Constraints:
+Ticket Constraints:
 """)
         self.assertEqual(0, returnVal)
 
@@ -2155,6 +2204,7 @@ Colocation Constraints:
   D2 with D5 (score:INFINITY) (rsc-role:Started) (with-rsc-role:Started) (id:colocation-D2-D5-INFINITY)
   D2 with D5 (score:INFINITY) (rsc-role:Stopped) (with-rsc-role:Stopped) (id:colocation-D2-D5-INFINITY-1)
   D2 with D5 (score:INFINITY) (rsc-role:Stopped) (with-rsc-role:Stopped) (id:colocation-D2-D5-INFINITY-2)
+Ticket Constraints:
 """)
 
     def testDuplicateSetConstraints(self):
@@ -2170,7 +2220,10 @@ Error: duplicate constraint already exists, use --force to override
         self.assertEqual(1, returnVal)
 
         output, returnVal = pcs("constraint order set D1 D2 --force")
-        ac(output, "")
+        ac(output, console_report(
+            "Warning: duplicate constraint already exists",
+            "  set D1 D2 (id:pcs_rsc_set_D1_D2) (id:pcs_rsc_order_set_D1_D2)",
+        ))
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint order set D1 D2 set D5 D6")
@@ -2185,7 +2238,10 @@ Error: duplicate constraint already exists, use --force to override
         self.assertEqual(1, returnVal)
 
         output, returnVal = pcs("constraint order set D1 D2 set D5 D6 --force")
-        ac(output, "")
+        ac(output, console_report(
+            "Warning: duplicate constraint already exists",
+            "  set D1 D2 (id:pcs_rsc_set_D1_D2-2) set D5 D6 (id:pcs_rsc_set_D5_D6) (id:pcs_rsc_order_set_D1_D2_set_D5_D6)",
+        ))
         self.assertEqual(0, returnVal)
 
 
@@ -2201,7 +2257,10 @@ Error: duplicate constraint already exists, use --force to override
         self.assertEqual(1, returnVal)
 
         output, returnVal = pcs("constraint colocation set D1 D2 --force")
-        ac(output, "")
+        ac(output, console_report(
+            "Warning: duplicate constraint already exists",
+            "  set D1 D2 (id:pcs_rsc_set_D1_D2-4) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D1_D2)"
+        ))
         self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint colocation set D1 D2 set D5 D6")
@@ -2218,7 +2277,10 @@ Error: duplicate constraint already exists, use --force to override
         output, returnVal = pcs(
             "constraint colocation set D1 D2 set D5 D6 --force"
         )
-        ac(output, "")
+        ac(output, console_report(
+            "Warning: duplicate constraint already exists",
+            "  set D1 D2 (id:pcs_rsc_set_D1_D2-6) set D5 D6 (id:pcs_rsc_set_D5_D6-2) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D1_D2_set_D5_D6)"
+        ))
         self.assertEqual(0, returnVal)
 
 
@@ -2248,6 +2310,7 @@ Colocation Constraints:
     set D1 D2 (id:pcs_rsc_set_D1_D2-6) set D5 D6 (id:pcs_rsc_set_D5_D6-2) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D1_D2_set_D5_D6)
     set D1 D2 (id:pcs_rsc_set_D1_D2-7) set D5 D6 (id:pcs_rsc_set_D5_D6-3) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D1_D2_set_D5_D6-1)
     set D6 D1 (id:pcs_rsc_set_D6_D1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D6_D1)
+Ticket Constraints:
 """)
 
     def testDuplicateLocationRules(self):
@@ -2286,7 +2349,7 @@ Error: duplicate constraint already exists, use --force to override
         ac(output, """\
 Error: duplicate constraint already exists, use --force to override
   Constraint: location-D2-1
-    Rule: score=INFINITY boolean-op=or  (id:location-D2-1-rule)
+    Rule: boolean-op=or score=INFINITY  (id:location-D2-1-rule)
       Expression: #uname eq node1  (id:location-D2-1-rule-expr)
       Expression: #uname eq node2  (id:location-D2-1-rule-expr-1)
 """)
@@ -2298,7 +2361,7 @@ Error: duplicate constraint already exists, use --force to override
         ac(output, """\
 Error: duplicate constraint already exists, use --force to override
   Constraint: location-D2-1
-    Rule: score=INFINITY boolean-op=or  (id:location-D2-1-rule)
+    Rule: boolean-op=or score=INFINITY  (id:location-D2-1-rule)
       Expression: #uname eq node1  (id:location-D2-1-rule-expr)
       Expression: #uname eq node2  (id:location-D2-1-rule-expr-1)
 """)
@@ -2314,26 +2377,27 @@ Error: duplicate constraint already exists, use --force to override
         ac(output, """\
 Location Constraints:
   Resource: D1
-    Constraint: location-D1-1
-      Rule: score=INFINITY  (id:location-D1-1-rule)
-        Expression: #uname eq node1  (id:location-D1-1-rule-expr)
     Constraint: location-D1
       Rule: score=INFINITY  (id:location-D1-rule)
         Expression: #uname eq node1  (id:location-D1-rule-expr)
+    Constraint: location-D1-1
+      Rule: score=INFINITY  (id:location-D1-1-rule)
+        Expression: #uname eq node1  (id:location-D1-1-rule-expr)
   Resource: D2
+    Constraint: location-D2
+      Rule: score=INFINITY  (id:location-D2-rule)
+        Expression: #uname eq node1  (id:location-D2-rule-expr)
     Constraint: location-D2-1
-      Rule: score=INFINITY boolean-op=or  (id:location-D2-1-rule)
+      Rule: boolean-op=or score=INFINITY  (id:location-D2-1-rule)
         Expression: #uname eq node1  (id:location-D2-1-rule-expr)
         Expression: #uname eq node2  (id:location-D2-1-rule-expr-1)
     Constraint: location-D2-2
-      Rule: score=INFINITY boolean-op=or  (id:location-D2-2-rule)
+      Rule: boolean-op=or score=INFINITY  (id:location-D2-2-rule)
         Expression: #uname eq node2  (id:location-D2-2-rule-expr)
         Expression: #uname eq node1  (id:location-D2-2-rule-expr-1)
-    Constraint: location-D2
-      Rule: score=INFINITY  (id:location-D2-rule)
-        Expression: #uname eq node1  (id:location-D2-rule-expr)
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
         self.assertEqual(0, returnVal)
 
@@ -2390,9 +2454,7 @@ Error: invalid constraint id '3id', '3' is not a valid first character for a con
             temp_cib,
             "constraint colocation set D1 D2 setoptions id=id3"
         )
-        ac(output, """\
-Error: id 'id3' is already in use, please specify another one
-""")
+        ac(output, "Error: 'id3' already exists\n")
         self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
@@ -2422,9 +2484,7 @@ Error: invalid constraint id '5id', '5' is not a valid first character for a con
             temp_cib,
             "constraint order set D1 D2 setoptions id=id5"
         )
-        ac(output, """\
-Error: id 'id5' is already in use, please specify another one
-""")
+        ac(output, "Error: 'id5' already exists\n")
         self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
@@ -2525,9 +2585,119 @@ Colocation Constraints:
   Resource Sets:
     set D1 D2 (id:pcs_rsc_set_D1_D2) setoptions score=INFINITY (id:id3)
     set D2 D1 (id:pcs_rsc_set_D2_D1) setoptions score=100 (id:id4)
+Ticket Constraints:
 """)
         self.assertEqual(0, returnVal)
 
-if __name__ == "__main__":
-    unittest.main()
+class ConstraintBaseTest(unittest.TestCase, AssertPcsMixin):
+    def setUp(self):
+        shutil.copy(empty_cib, temp_cib)
+        self.pcs_runner = PcsRunner(temp_cib)
+        self.assert_pcs_success('resource create A Dummy')
+        self.assert_pcs_success('resource create B Dummy')
+
+
+class CommonCreateWithSet(ConstraintBaseTest):
+    def test_refuse_when_resource_does_not_exist(self):
+        self.assert_pcs_fail(
+            'constraint ticket set A C setoptions ticket=T',
+            ["Error: Resource 'C' does not exist"]
+        )
+
+class TicketCreateWithSet(ConstraintBaseTest):
+    def test_create_ticket(self):
+        self.assert_pcs_success(
+            'constraint ticket set A B setoptions ticket=T loss-policy=fence'
+        )
+
+    def test_can_skip_loss_policy(self):
+        self.assert_pcs_success('constraint ticket set A B setoptions ticket=T')
+        self.assert_pcs_success('constraint ticket show', stdout_full=[
+            "Ticket Constraints:",
+            "  Resource Sets:",
+            "    set A B setoptions ticket=T",
+        ])
+
+    def test_refuse_bad_loss_policy(self):
+        self.assert_pcs_fail(
+            'constraint ticket set A B setoptions ticket=T loss-policy=none',
+            ["Error: 'none' is not a valid loss-policy value, use fence, stop, freeze, demote"]
+        )
+
+    def test_refuse_when_ticket_option_is_missing(self):
+        self.assert_pcs_fail(
+            'constraint ticket set A B setoptions loss-policy=fence',
+            ["Error: required option 'ticket' is missing"]
+        )
+
+    def test_refuse_when_option_is_invalid(self):
+        self.assert_pcs_fail(
+            'constraint ticket set A B setoptions loss-policy',
+            stdout_start=["Error: missing value of 'loss-policy' option"]
+        )
 
+class TicketAdd(ConstraintBaseTest):
+    def test_create_ticket(self):
+        self.assert_pcs_success(
+            'constraint ticket add T master A loss-policy=fence'
+        )
+        self.assert_pcs_success('constraint ticket show', stdout_full=[
+            "Ticket Constraints:",
+            "  Master A loss-policy=fence ticket=T",
+        ])
+
+    def test_refuse_noexistent_resource_id(self):
+        self.assert_pcs_fail(
+            'constraint ticket add T master AA loss-policy=fence',
+            ["Error: Resource 'AA' does not exist"]
+        )
+
+    def test_refuse_invalid_role(self):
+        self.assert_pcs_fail(
+            'constraint ticket add T bad-role A loss-policy=fence',
+            ["Error: 'bad-role' is not a valid rsc-role value, use Stopped, Started, Master, Slave"]
+        )
+
+    def test_refuse_duplicate_ticket(self):
+        self.assert_pcs_success(
+            'constraint ticket add T master A loss-policy=fence'
+        )
+        self.assert_pcs_fail(
+            'constraint ticket add T master A loss-policy=fence',
+            [
+                'Error: duplicate constraint already exists, use --force to override',
+                '  Master A loss-policy=fence ticket=T (id:ticket-T-A-Master)'
+            ]
+        )
+
+    def test_accept_duplicate_ticket_with_force(self):
+        self.assert_pcs_success(
+            'constraint ticket add T master A loss-policy=fence'
+        )
+        self.assert_pcs_success(
+            'constraint ticket add T master A loss-policy=fence --force', [
+                "Warning: duplicate constraint already exists",
+                "  Master A loss-policy=fence ticket=T (id:ticket-T-A-Master)"
+            ]
+        )
+        self.assert_pcs_success('constraint ticket show', stdout_full=[
+            "Ticket Constraints:",
+            "  Master A loss-policy=fence ticket=T",
+            "  Master A loss-policy=fence ticket=T",
+        ])
+
+class TicketShow(ConstraintBaseTest):
+    def test_show_set(self):
+        self.assert_pcs_success('constraint ticket set A B setoptions ticket=T')
+        self.assert_pcs_success(
+            'constraint ticket add T master A loss-policy=fence'
+        )
+        self.assert_pcs_success(
+            'constraint ticket show',
+            [
+                "Ticket Constraints:",
+                "  Master A loss-policy=fence ticket=T",
+                "  Resource Sets:",
+                "    set A B setoptions ticket=T",
+            ]
+        )
diff --git a/pcs/test/test_library_acl.py b/pcs/test/test_lib_cib_acl.py
similarity index 63%
rename from pcs/test/test_library_acl.py
rename to pcs/test/test_lib_cib_acl.py
index 63eea0a..7e1750e 100644
--- a/pcs/test/test_library_acl.py
+++ b/pcs/test/test_lib_cib_acl.py
@@ -1,27 +1,26 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
-
-import unittest
-import os.path
-import sys
-
-currentdir = os.path.dirname(os.path.abspath(__file__))
-sys.path.insert(0, os.path.dirname(currentdir))
-
-import library_acl as lib
-from errors import error_codes
-from errors import ReportItemSeverity as severities
-from library_test_tools import LibraryAssertionMixin
-from library_test_tools import get_cib_manipulation_creator
-
-
-class LibraryAclTest(unittest.TestCase, LibraryAssertionMixin):
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+
+from pcs.test.tools.assertions import (
+    assert_raise_library_error,
+    assert_xml_equal,
+)
+from pcs.test.tools.misc import get_test_resource as rc
+from pcs.test.tools.xml import get_xml_manipulation_creator_from_file
+
+from pcs.lib.cib import acl as lib
+from pcs.common import report_codes
+from pcs.lib.errors import ReportItemSeverity as severities
+
+class LibraryAclTest(TestCase):
     def setUp(self):
-        self.create_cib = get_cib_manipulation_creator(
-            os.path.join(currentdir, "empty.xml")
-        )
+        self.create_cib = get_xml_manipulation_creator_from_file(rc("cib-empty.xml"))
         self.cib = self.create_cib()
 
     def fixture_add_role(self, role_id):
@@ -30,10 +29,16 @@ class LibraryAclTest(unittest.TestCase, LibraryAssertionMixin):
             '<acls><acl_role id="{0}"/></acls>'.format(role_id)
         )
 
+    def assert_cib_equal(self, expected_cib):
+        got_xml = str(self.cib)
+        expected_xml = str(expected_cib)
+        assert_xml_equal(expected_xml, got_xml)
+
+
 class CreateRoleTest(LibraryAclTest):
     def test_create_for_new_role_id(self):
         role_id = 'new-id'
-        lib.create_role(self.cib.dom, role_id)
+        lib.create_role(self.cib.tree, role_id)
 
         self.assert_cib_equal(
             self.create_cib().append_to_first_tag_name(
@@ -43,38 +48,26 @@ class CreateRoleTest(LibraryAclTest):
         )
 
     def test_refuse_invalid_id(self):
-        self.assert_raise_library_error(
-            lambda: lib.create_role(self.cib.dom, '#invalid'),
+        assert_raise_library_error(
+            lambda: lib.create_role(self.cib.tree, '#invalid'),
             (
                 severities.ERROR,
-                error_codes.ID_IS_NOT_VALID,
+                report_codes.INVALID_ID,
                 {'id': '#invalid'},
             ),
         )
 
-    def test_refuse_existing_role_id(self):
-        role_id = 'role1'
-        self.fixture_add_role(role_id)
-        self.assert_raise_library_error(
-            lambda: lib.create_role(self.cib.dom, role_id),
-            (
-                severities.ERROR,
-                error_codes.ACL_ROLE_ALREADY_EXISTS,
-                {'id': role_id},
-            ),
-        )
-
     def test_refuse_existing_non_role_id(self):
         self.cib.append_to_first_tag_name(
             'nodes',
             '<node id="node-id" uname="node-hostname"/>'
         )
 
-        self.assert_raise_library_error(
-            lambda: lib.create_role(self.cib.dom, 'node-id'),
+        assert_raise_library_error(
+            lambda: lib.create_role(self.cib.tree, 'node-id'),
             (
                 severities.ERROR,
-                error_codes.ID_ALREADY_EXISTS,
+                report_codes.ID_ALREADY_EXISTS,
                 {'id': 'node-id'},
             ),
         )
@@ -85,7 +78,7 @@ class AddPermissionsToRoleTest(LibraryAclTest):
         self.fixture_add_role(role_id)
 
         lib.add_permissions_to_role(
-            self.cib.dom, role_id, [('read', 'xpath', '/whatever')]
+            self.cib.tree, role_id, [('read', 'xpath', '/whatever')]
         )
 
         self.assert_cib_equal(
@@ -101,14 +94,17 @@ class AddPermissionsToRoleTest(LibraryAclTest):
 
     def test_refuse_add_for_nonexistent_role_id(self):
         role_id = 'role1'
-        self.assert_raise_library_error(
+        assert_raise_library_error(
             lambda: lib.add_permissions_to_role(
-                self.cib.dom, role_id, [('read', 'xpath', '/whatever')]
+                self.cib.tree, role_id, [('read', 'xpath', '/whatever')]
             ),
             (
                 severities.ERROR,
-                error_codes.ACL_ROLE_NOT_FOUND,
-                {'role_id': role_id},
+                report_codes.ID_NOT_FOUND,
+                {
+                    "id": role_id,
+                    "id_description": "role",
+                }
             ),
         )
 
@@ -116,19 +112,27 @@ class AddPermissionsToRoleTest(LibraryAclTest):
         role_id = 'role1'
         self.fixture_add_role(role_id)
 
-        self.assert_raise_library_error(
+        assert_raise_library_error(
             lambda: lib.add_permissions_to_role(
-                self.cib.dom, role_id, [('readX', 'xpathX', '/whatever')]
+                self.cib.tree, role_id, [('readX', 'xpathX', '/whatever')]
             ),
             (
                 severities.ERROR,
-                error_codes.BAD_ACL_PERMISSION,
-                {'permission': 'readX'},
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "permission",
+                    "option_value": "readX",
+                    "allowed_values": ["read", "write", "deny"],
+                }
             ),
             (
                 severities.ERROR,
-                error_codes.BAD_ACL_SCOPE_TYPE,
-                {'scope_type': 'xpathX'},
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "scope type",
+                    "option_value": "xpathX",
+                    "allowed_values": ["xpath", "id"],
+                }
             ),
         )
 
@@ -136,13 +140,13 @@ class AddPermissionsToRoleTest(LibraryAclTest):
         role_id = 'role1'
         self.fixture_add_role(role_id)
 
-        self.assert_raise_library_error(
+        assert_raise_library_error(
             lambda: lib.add_permissions_to_role(
-                self.cib.dom, role_id, [('read', 'id', 'non-existent')]
+                self.cib.tree, role_id, [('read', 'id', 'non-existent')]
             ),
             (
                 severities.ERROR,
-                error_codes.ID_NOT_FOUND,
+                report_codes.ID_NOT_FOUND,
                 {'id': 'non-existent'}
             ),
         )
@@ -150,7 +154,7 @@ class AddPermissionsToRoleTest(LibraryAclTest):
 class ProvideRoleTest(LibraryAclTest):
     def test_add_role_for_nonexisting_id(self):
         role_id = 'new-id'
-        lib.provide_role(self.cib.dom, role_id)
+        lib.provide_role(self.cib.tree, role_id)
 
         self.assert_cib_equal(
             self.create_cib().append_to_first_tag_name('configuration', '''
@@ -164,7 +168,7 @@ class ProvideRoleTest(LibraryAclTest):
         self.fixture_add_role('role1')
 
         role_id = 'role1'
-        lib.provide_role(self.cib.dom, role_id)
+        lib.provide_role(self.cib.tree, role_id)
 
         self.assert_cib_equal(
             self.create_cib().append_to_first_tag_name('configuration', '''
@@ -176,7 +180,7 @@ class ProvideRoleTest(LibraryAclTest):
 
 class RemovePermissionForReferenceTest(LibraryAclTest):
     def test_has_no_efect_when_id_not_referenced(self):
-        lib.remove_permissions_referencing(self.cib.dom, 'dummy')
+        lib.remove_permissions_referencing(self.cib.tree, 'dummy')
         self.assert_cib_equal(self.create_cib())
 
     def test_remove_all_references(self):
@@ -192,7 +196,7 @@ class RemovePermissionForReferenceTest(LibraryAclTest):
             </acls>
         ''')
 
-        lib.remove_permissions_referencing(self.cib.dom, 'dummy')
+        lib.remove_permissions_referencing(self.cib.tree, 'dummy')
 
         self.assert_cib_equal(
             self.create_cib().append_to_first_tag_name('configuration', '''
@@ -208,7 +212,3 @@ class RemovePermissionForReferenceTest(LibraryAclTest):
               </acls>
             ''')
         )
-
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/pcs/test/test_lib_cib_tools.py b/pcs/test/test_lib_cib_tools.py
new file mode 100644
index 0000000..405a270
--- /dev/null
+++ b/pcs/test/test_lib_cib_tools.py
@@ -0,0 +1,147 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+
+from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.misc import get_test_resource as rc
+from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.xml import get_xml_manipulation_creator_from_file
+
+from pcs.common import report_codes
+from pcs.lib.errors import ReportItemSeverity as severities
+
+from pcs.lib.cib import tools as lib
+
+class CibToolsTest(TestCase):
+    def setUp(self):
+        self.create_cib = get_xml_manipulation_creator_from_file(rc("cib-empty.xml"))
+        self.cib = self.create_cib()
+
+    def fixture_add_primitive_with_id(self, element_id):
+        self.cib.append_to_first_tag_name(
+            "resources",
+            '<primitive id="{0}" class="ocf" provider="heartbeat" type="Dummy"/>'
+                .format(element_id)
+        )
+
+class DoesIdExistTest(CibToolsTest):
+    def test_existing_id(self):
+        self.fixture_add_primitive_with_id("myId")
+        self.assertTrue(lib.does_id_exist(self.cib.tree, "myId"))
+
+    def test_nonexisting_id(self):
+        self.fixture_add_primitive_with_id("myId")
+        self.assertFalse(lib.does_id_exist(self.cib.tree, "otherId"))
+        self.assertFalse(lib.does_id_exist(self.cib.tree, "myid"))
+        self.assertFalse(lib.does_id_exist(self.cib.tree, " myId"))
+        self.assertFalse(lib.does_id_exist(self.cib.tree, "myId "))
+        self.assertFalse(lib.does_id_exist(self.cib.tree, "my Id"))
+
+class FindUniqueIdTest(CibToolsTest):
+    def test_already_unique(self):
+        self.fixture_add_primitive_with_id("myId")
+        self.assertEqual("other", lib.find_unique_id(self.cib.tree, "other"))
+
+    def test_add_suffix(self):
+        self.fixture_add_primitive_with_id("myId")
+        self.assertEqual("myId-1", lib.find_unique_id(self.cib.tree, "myId"))
+
+        self.fixture_add_primitive_with_id("myId-1")
+        self.assertEqual("myId-2", lib.find_unique_id(self.cib.tree, "myId"))
+
+    def test_suffix_not_needed(self):
+        self.fixture_add_primitive_with_id("myId-1")
+        self.assertEqual("myId", lib.find_unique_id(self.cib.tree, "myId"))
+
+    def test_add_first_available_suffix(self):
+        self.fixture_add_primitive_with_id("myId")
+        self.fixture_add_primitive_with_id("myId-1")
+        self.fixture_add_primitive_with_id("myId-3")
+        self.assertEqual("myId-2", lib.find_unique_id(self.cib.tree, "myId"))
+
+class GetConfigurationTest(CibToolsTest):
+    def test_success_if_exists(self):
+        self.assertEqual(
+            "configuration",
+            lib.get_configuration(self.cib.tree).tag
+        )
+
+    def test_raise_if_missing(self):
+        for conf in self.cib.tree.findall(".//configuration"):
+            conf.getparent().remove(conf)
+        assert_raise_library_error(
+            lambda: lib.get_configuration(self.cib.tree),
+            (
+                severities.ERROR,
+                report_codes.CIB_CANNOT_FIND_MANDATORY_SECTION,
+                {
+                    "section": "configuration",
+                }
+            ),
+        )
+
+class GetConstraintsTest(CibToolsTest):
+    def test_success_if_exists(self):
+        self.assertEqual(
+            "constraints",
+            lib.get_constraints(self.cib.tree).tag
+        )
+
+    def test_raise_if_missing(self):
+        for section in self.cib.tree.findall(".//configuration/constraints"):
+            section.getparent().remove(section)
+        assert_raise_library_error(
+            lambda: lib.get_constraints(self.cib.tree),
+            (
+                severities.ERROR,
+                report_codes.CIB_CANNOT_FIND_MANDATORY_SECTION,
+                {
+                    "section": "configuration/constraints",
+                }
+            ),
+        )
+
+
+class GetAclsTest(CibToolsTest):
+    def setUp(self):
+        self.create_cib = get_xml_manipulation_creator_from_file(rc("cib-empty-1.2.xml"))
+        self.cib = self.create_cib()
+
+    def test_success_if_exists(self):
+        self.cib.append_to_first_tag_name(
+            "configuration",
+            '<acls><acl_role id="test_role" /></acls>'
+        )
+        self.assertEqual(
+            "test_role",
+            lib.get_acls(self.cib.tree)[0].get("id")
+        )
+
+    def test_success_if_missing(self):
+        acls = lib.get_acls(self.cib.tree)
+        self.assertEqual("acls", acls.tag)
+        self.assertEqual("configuration", acls.getparent().tag)
+
+ at mock.patch('pcs.lib.cib.tools.does_id_exist')
+class ValidateIdDoesNotExistsTest(TestCase):
+    def test_success_when_id_does_not_exists(self, does_id_exists):
+        does_id_exists.return_value = False
+        lib.validate_id_does_not_exist("tree", "some-id")
+        does_id_exists.assert_called_once_with("tree", "some-id")
+
+    def test_raises_whne_id_exists(self, does_id_exists):
+        does_id_exists.return_value = True
+        assert_raise_library_error(
+            lambda: lib.validate_id_does_not_exist("tree", "some-id"),
+            (
+                severities.ERROR,
+                report_codes.ID_ALREADY_EXISTS,
+                {"id": "some-id"},
+            ),
+        )
+        does_id_exists.assert_called_once_with("tree", "some-id")
diff --git a/pcs/test/test_lib_commands_quorum.py b/pcs/test/test_lib_commands_quorum.py
new file mode 100644
index 0000000..172f895
--- /dev/null
+++ b/pcs/test/test_lib_commands_quorum.py
@@ -0,0 +1,671 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import logging
+from unittest import TestCase
+
+from pcs.test.tools.assertions import (
+    assert_raise_library_error,
+    assert_report_item_list_equal,
+)
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.misc import (
+    ac,
+    get_test_resource as rc,
+)
+from pcs.test.tools.pcs_mock import mock
+
+from pcs.common import report_codes
+from pcs.lib.env import LibraryEnvironment
+from pcs.lib.errors import ReportItemSeverity as severity
+
+from pcs.lib.commands import quorum as lib
+
+
+class CmanMixin(object):
+    def assert_disabled_on_cman(self, func):
+        assert_raise_library_error(
+            func,
+            (
+                severity.ERROR,
+                report_codes.CMAN_UNSUPPORTED_COMMAND,
+                {}
+            )
+        )
+
+
+ at mock.patch.object(LibraryEnvironment, "get_corosync_conf_data")
+class GetQuorumConfigTest(TestCase, CmanMixin):
+    def setUp(self):
+        self.mock_logger = mock.MagicMock(logging.Logger)
+        self.mock_reporter = MockLibraryReportProcessor()
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
+    def test_disabled_on_cman(self, mock_get_corosync):
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+        self.assert_disabled_on_cman(lambda: lib.get_config(lib_env))
+        mock_get_corosync.assert_not_called()
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_enabled_on_cman_if_not_live(self, mock_get_corosync):
+        original_conf = open(rc("corosync.conf")).read()
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(
+            self.mock_logger,
+            self.mock_reporter,
+            corosync_conf_data=original_conf
+        )
+
+        self.assertEqual(
+            {
+                "options": {},
+                "device": None,
+            },
+            lib.get_config(lib_env)
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_no_options(self, mock_get_corosync):
+        original_conf = open(rc("corosync.conf")).read()
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        self.assertEqual(
+            {
+                "options": {},
+                "device": None,
+            },
+            lib.get_config(lib_env)
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_options(self, mock_get_corosync):
+        original_conf = "quorum {\nwait_for_all: 1\n}\n"
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        self.assertEqual(
+            {
+                "options": {
+                    "wait_for_all": "1",
+                },
+                "device": None,
+            },
+            lib.get_config(lib_env)
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_device(self, mock_get_corosync):
+        original_conf = """\
+            quorum {
+                provider: corosync_votequorum
+                wait_for_all: 1
+                device {
+                    option: value
+                    model: net
+                    net {
+                        host: 127.0.0.1
+                        port: 4433
+                    }
+                }
+            }
+        """
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        self.assertEqual(
+            {
+                "options": {
+                    "wait_for_all": "1",
+                },
+                "device": {
+                    "model": "net",
+                    "model_options": {
+                        "host": "127.0.0.1",
+                        "port": "4433",
+                    },
+                    "generic_options": {
+                        "option": "value",
+                    },
+                },
+            },
+            lib.get_config(lib_env)
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+
+ at mock.patch.object(LibraryEnvironment, "push_corosync_conf")
+ at mock.patch.object(LibraryEnvironment, "get_corosync_conf_data")
+class SetQuorumOptionsTest(TestCase, CmanMixin):
+    def setUp(self):
+        self.mock_logger = mock.MagicMock(logging.Logger)
+        self.mock_reporter = MockLibraryReportProcessor()
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
+    def test_disabled_on_cman(self, mock_get_corosync, mock_push_corosync):
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+        self.assert_disabled_on_cman(lambda: lib.set_options(lib_env, {}))
+        mock_get_corosync.assert_not_called()
+        mock_push_corosync.assert_not_called()
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
+    def test_enabled_on_cman_if_not_live(
+        self, mock_get_corosync, mock_push_corosync
+    ):
+        original_conf = "invalid {\nconfig: stop after cman test"
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(
+            self.mock_logger,
+            self.mock_reporter,
+            corosync_conf_data=original_conf
+        )
+        options = {"wait_for_all": "1"}
+        assert_raise_library_error(
+            lambda: lib.set_options(lib_env, options),
+            (
+                severity.ERROR,
+                report_codes.PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE,
+                {}
+            )
+        )
+
+        mock_push_corosync.assert_not_called()
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_success(self, mock_get_corosync, mock_push_corosync):
+        original_conf = open(rc("corosync-3nodes.conf")).read()
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        new_options = {"wait_for_all": "1"}
+        lib.set_options(lib_env, new_options)
+
+        self.assertEqual(1, len(mock_push_corosync.mock_calls))
+        ac(
+            mock_push_corosync.mock_calls[0][1][0].config.export(),
+            original_conf.replace(
+                "provider: corosync_votequorum\n",
+                "provider: corosync_votequorum\n    wait_for_all: 1\n"
+            )
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_bad_options(self, mock_get_corosync, mock_push_corosync):
+        original_conf = open(rc("corosync.conf")).read()
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        new_options = {"invalid": "option"}
+        assert_raise_library_error(
+            lambda: lib.set_options(lib_env, new_options),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "invalid",
+                    "option_type": "quorum",
+                    "allowed": [
+                        "auto_tie_breaker",
+                        "last_man_standing",
+                        "last_man_standing_window",
+                        "wait_for_all",
+                    ],
+                }
+            )
+        )
+
+        mock_push_corosync.assert_not_called()
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_bad_config(self, mock_get_corosync, mock_push_corosync):
+        original_conf = "invalid {\nconfig: this is"
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        new_options = {"wait_for_all": "1"}
+        assert_raise_library_error(
+            lambda: lib.set_options(lib_env, new_options),
+            (
+                severity.ERROR,
+                report_codes.PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE,
+                {}
+            )
+        )
+
+        mock_push_corosync.assert_not_called()
+
+
+ at mock.patch.object(LibraryEnvironment, "push_corosync_conf")
+ at mock.patch.object(LibraryEnvironment, "get_corosync_conf_data")
+class AddDeviceTest(TestCase, CmanMixin):
+    def setUp(self):
+        self.mock_logger = mock.MagicMock(logging.Logger)
+        self.mock_reporter = MockLibraryReportProcessor()
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
+    def test_disabled_on_cman(self, mock_get_corosync, mock_push_corosync):
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+        self.assert_disabled_on_cman(
+            lambda: lib.add_device(lib_env, "net", {"host": "127.0.0.1"}, {})
+        )
+        mock_get_corosync.assert_not_called()
+        mock_push_corosync.assert_not_called()
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
+    def test_enabled_on_cman_if_not_live(
+        self, mock_get_corosync, mock_push_corosync
+    ):
+        original_conf = open(rc("corosync-3nodes.conf")).read()
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(
+            self.mock_logger,
+            self.mock_reporter,
+            corosync_conf_data=original_conf
+        )
+
+        assert_raise_library_error(
+            lambda: lib.add_device(lib_env, "bad model", {}, {}),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "model",
+                    "option_value": "bad model",
+                    "allowed_values": ("net", ),
+                },
+                report_codes.FORCE_QDEVICE_MODEL
+            )
+        )
+
+        self.assertEqual(1, mock_get_corosync.call_count)
+        self.assertEqual(0, mock_push_corosync.call_count)
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_success(self, mock_get_corosync, mock_push_corosync):
+        original_conf = open(rc("corosync-3nodes.conf")).read()
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        lib.add_device(
+            lib_env,
+            "net",
+            {"host": "127.0.0.1"},
+            {"timeout": "12345"}
+        )
+
+        self.assertEqual(1, len(mock_push_corosync.mock_calls))
+        ac(
+            mock_push_corosync.mock_calls[0][1][0].config.export(),
+            original_conf.replace(
+                "provider: corosync_votequorum\n",
+                """provider: corosync_votequorum
+
+    device {
+        timeout: 12345
+        model: net
+
+        net {
+            host: 127.0.0.1
+        }
+    }
+"""
+            )
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_invalid_options(self, mock_get_corosync, mock_push_corosync):
+        original_conf = open(rc("corosync-3nodes.conf")).read()
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        assert_raise_library_error(
+            lambda: lib.add_device(
+                lib_env,
+                "net",
+                {"host": "127.0.0.1", },
+                {"bad_option": "bad_value", }
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "bad_option",
+                    "option_type": "quorum device",
+                    "allowed": ["sync_timeout", "timeout"],
+                },
+                report_codes.FORCE_OPTIONS
+            )
+        )
+
+        self.assertEqual(1, mock_get_corosync.call_count)
+        self.assertEqual(0, mock_push_corosync.call_count)
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_invalid_options_forced(self, mock_get_corosync, mock_push_corosync):
+        original_conf = open(rc("corosync-3nodes.conf")).read()
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        lib.add_device(
+            lib_env,
+            "net",
+            {"host": "127.0.0.1", },
+            {"bad_option": "bad_value", },
+            force_options=True
+        )
+
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_name": "bad_option",
+                        "option_type": "quorum device",
+                        "allowed": ["sync_timeout", "timeout"],
+                    }
+                )
+            ]
+        )
+        self.assertEqual(1, mock_get_corosync.call_count)
+        self.assertEqual(1, len(mock_push_corosync.mock_calls))
+        ac(
+            mock_push_corosync.mock_calls[0][1][0].config.export(),
+            original_conf.replace(
+                "provider: corosync_votequorum\n",
+                """provider: corosync_votequorum
+
+    device {
+        bad_option: bad_value
+        model: net
+
+        net {
+            host: 127.0.0.1
+        }
+    }
+"""
+            )
+        )
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_invalid_model(self, mock_get_corosync, mock_push_corosync):
+        original_conf = open(rc("corosync-3nodes.conf")).read()
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        assert_raise_library_error(
+            lambda: lib.add_device(lib_env, "bad model", {}, {}),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "model",
+                    "option_value": "bad model",
+                    "allowed_values": ("net", ),
+                },
+                report_codes.FORCE_QDEVICE_MODEL
+            )
+        )
+
+        self.assertEqual(1, mock_get_corosync.call_count)
+        self.assertEqual(0, mock_push_corosync.call_count)
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_invalid_model_forced(self, mock_get_corosync, mock_push_corosync):
+        original_conf = open(rc("corosync-3nodes.conf")).read()
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        lib.add_device(lib_env, "bad model", {}, {}, force_model=True)
+
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION_VALUE,
+                    {
+                        "option_name": "model",
+                        "option_value": "bad model",
+                        "allowed_values": ("net", ),
+                    },
+                )
+            ]
+        )
+        self.assertEqual(1, mock_get_corosync.call_count)
+        self.assertEqual(1, len(mock_push_corosync.mock_calls))
+        ac(
+            mock_push_corosync.mock_calls[0][1][0].config.export(),
+            original_conf.replace(
+                "provider: corosync_votequorum\n",
+                """provider: corosync_votequorum
+
+    device {
+        model: bad model
+    }
+"""
+            )
+        )
+
+
+ at mock.patch.object(LibraryEnvironment, "push_corosync_conf")
+ at mock.patch.object(LibraryEnvironment, "get_corosync_conf_data")
+class RemoveDeviceTest(TestCase, CmanMixin):
+    def setUp(self):
+        self.mock_logger = mock.MagicMock(logging.Logger)
+        self.mock_reporter = MockLibraryReportProcessor()
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
+    def test_disabled_on_cman(self, mock_get_corosync, mock_push_corosync):
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+        self.assert_disabled_on_cman(lambda: lib.remove_device(lib_env))
+        mock_get_corosync.assert_not_called()
+        mock_push_corosync.assert_not_called()
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
+    def test_enabled_on_cman_if_not_live(
+        self, mock_get_corosync, mock_push_corosync
+    ):
+        original_conf = open(rc("corosync-3nodes.conf")).read()
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(
+            self.mock_logger,
+            self.mock_reporter,
+            corosync_conf_data=original_conf
+        )
+
+        assert_raise_library_error(
+            lambda: lib.remove_device(lib_env),
+            (
+                severity.ERROR,
+                report_codes.QDEVICE_NOT_DEFINED,
+                {}
+            )
+        )
+
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_no_device(self, mock_get_corosync, mock_push_corosync):
+        original_conf = open(rc("corosync-3nodes.conf")).read()
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        assert_raise_library_error(
+            lambda: lib.remove_device(lib_env),
+            (
+                severity.ERROR,
+                report_codes.QDEVICE_NOT_DEFINED,
+                {}
+            )
+        )
+
+        mock_push_corosync.assert_not_called()
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_success(self, mock_get_corosync, mock_push_corosync):
+        original_conf = open(rc("corosync-3nodes-qdevice.conf")).read()
+        no_device_conf = open(rc("corosync-3nodes.conf")).read()
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        lib.remove_device(lib_env)
+
+        self.assertEqual(1, len(mock_push_corosync.mock_calls))
+        ac(
+            mock_push_corosync.mock_calls[0][1][0].config.export(),
+            no_device_conf
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+
+ at mock.patch.object(LibraryEnvironment, "push_corosync_conf")
+ at mock.patch.object(LibraryEnvironment, "get_corosync_conf_data")
+class UpdateDeviceTest(TestCase, CmanMixin):
+    def setUp(self):
+        self.mock_logger = mock.MagicMock(logging.Logger)
+        self.mock_reporter = MockLibraryReportProcessor()
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
+    def test_disabled_on_cman(self, mock_get_corosync, mock_push_corosync):
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+        self.assert_disabled_on_cman(
+            lambda: lib.update_device(lib_env, {"host": "127.0.0.1"}, {})
+        )
+        mock_get_corosync.assert_not_called()
+        mock_push_corosync.assert_not_called()
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: True)
+    def test_enabled_on_cman_if_not_live(
+        self, mock_get_corosync, mock_push_corosync
+    ):
+        original_conf = open(rc("corosync-3nodes.conf")).read()
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(
+            self.mock_logger,
+            self.mock_reporter,
+            corosync_conf_data=original_conf
+        )
+
+        assert_raise_library_error(
+            lambda: lib.update_device(lib_env, {"host": "127.0.0.1"}, {}),
+            (
+                severity.ERROR,
+                report_codes.QDEVICE_NOT_DEFINED,
+                {}
+            )
+        )
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_no_device(self, mock_get_corosync, mock_push_corosync):
+        original_conf = open(rc("corosync-3nodes.conf")).read()
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        assert_raise_library_error(
+            lambda: lib.update_device(lib_env, {"host": "127.0.0.1"}, {}),
+            (
+                severity.ERROR,
+                report_codes.QDEVICE_NOT_DEFINED,
+                {}
+            )
+        )
+
+        mock_push_corosync.assert_not_called()
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_success(self, mock_get_corosync, mock_push_corosync):
+        original_conf = open(rc("corosync-3nodes-qdevice.conf")).read()
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        lib.update_device(
+            lib_env,
+            {"host": "127.0.0.2"},
+            {"timeout": "12345"}
+        )
+
+        self.assertEqual(1, len(mock_push_corosync.mock_calls))
+        ac(
+            mock_push_corosync.mock_calls[0][1][0].config.export(),
+            original_conf
+                .replace("host: 127.0.0.1", "host: 127.0.0.2")
+                .replace(
+                    "model: net",
+                    "model: net\n        timeout: 12345"
+                )
+        )
+        self.assertEqual([], self.mock_reporter.report_item_list)
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_invalid_options(self, mock_get_corosync, mock_push_corosync):
+        original_conf = open(rc("corosync-3nodes-qdevice.conf")).read()
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        assert_raise_library_error(
+            lambda: lib.update_device(
+                lib_env,
+                {},
+                {"bad_option": "bad_value", }
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "bad_option",
+                    "option_type": "quorum device",
+                    "allowed": ["sync_timeout", "timeout"],
+                },
+                report_codes.FORCE_OPTIONS
+            )
+        )
+
+        self.assertEqual(1, mock_get_corosync.call_count)
+        self.assertEqual(0, mock_push_corosync.call_count)
+
+    @mock.patch("pcs.lib.env.is_cman_cluster", lambda self: False)
+    def test_invalid_options_forced(self, mock_get_corosync, mock_push_corosync):
+        original_conf = open(rc("corosync-3nodes-qdevice.conf")).read()
+        mock_get_corosync.return_value = original_conf
+        lib_env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        lib.update_device(
+            lib_env,
+            {},
+            {"bad_option": "bad_value", },
+            force_options=True
+        )
+
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_name": "bad_option",
+                        "option_type": "quorum device",
+                        "allowed": ["sync_timeout", "timeout"],
+                    }
+                )
+            ]
+        )
+        self.assertEqual(1, mock_get_corosync.call_count)
+        self.assertEqual(1, len(mock_push_corosync.mock_calls))
+        ac(
+            mock_push_corosync.mock_calls[0][1][0].config.export(),
+            original_conf.replace(
+                "model: net",
+                "model: net\n        bad_option: bad_value"
+            )
+        )
diff --git a/pcs/test/test_lib_corosync_config_facade.py b/pcs/test/test_lib_corosync_config_facade.py
new file mode 100644
index 0000000..3c18d65
--- /dev/null
+++ b/pcs/test/test_lib_corosync_config_facade.py
@@ -0,0 +1,2042 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+import re
+
+from pcs.test.tools.assertions import (
+    assert_raise_library_error,
+    assert_report_item_list_equal,
+)
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.misc import (
+    ac,
+    get_test_resource as rc,
+)
+
+from pcs.common import report_codes
+from pcs.lib.errors import ReportItemSeverity as severity
+
+import pcs.lib.corosync.config_facade as lib
+
+
+class FromStringTest(TestCase):
+    def test_success(self):
+        config = open(rc("corosync.conf")).read()
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertEqual(facade.__class__, lib.ConfigFacade)
+        self.assertEqual(facade.config.export(), config)
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_parse_error_missing_brace(self):
+        config = "section {"
+        assert_raise_library_error(
+            lambda: lib.ConfigFacade.from_string(config),
+            (
+                severity.ERROR,
+                report_codes.PARSE_ERROR_COROSYNC_CONF_MISSING_CLOSING_BRACE,
+                {}
+            )
+        )
+
+    def test_parse_error_unexpected_brace(self):
+        config = "}"
+        assert_raise_library_error(
+            lambda: lib.ConfigFacade.from_string(config),
+            (
+                severity.ERROR,
+                report_codes.PARSE_ERROR_COROSYNC_CONF_UNEXPECTED_CLOSING_BRACE,
+                {}
+            )
+        )
+
+
+class GetNodesTest(TestCase):
+    def assert_equal_nodelist(self, expected_nodes, real_nodelist):
+        real_nodes = [
+            {"ring0": n.ring0, "ring1": n.ring1, "label": n.label, "id": n.id}
+            for n in real_nodelist
+        ]
+        self.assertEqual(expected_nodes, real_nodes)
+
+    def test_no_nodelist(self):
+        config = ""
+        facade = lib.ConfigFacade.from_string(config)
+        nodes = facade.get_nodes()
+        self.assertEqual(0, len(nodes))
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_empty_nodelist(self):
+        config = """\
+nodelist {
+}
+"""
+        facade = lib.ConfigFacade.from_string(config)
+        nodes = facade.get_nodes()
+        self.assertEqual(0, len(nodes))
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_one_nodelist(self):
+        config = """\
+nodelist {
+    node {
+        ring0_addr: n1a
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: n2a
+        ring1_addr: n2b
+        name: n2n
+        nodeid: 2
+    }
+}
+"""
+        facade = lib.ConfigFacade.from_string(config)
+        nodes = facade.get_nodes()
+        self.assertEqual(2, len(nodes))
+        self.assert_equal_nodelist(
+            [
+                {"ring0": "n1a", "ring1": None, "label": "n1a", "id": "1"},
+                {"ring0": "n2a", "ring1": "n2b", "label": "n2n", "id": "2"},
+            ],
+            nodes
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_more_nodelists(self):
+        config = """\
+nodelist {
+    node {
+        ring0_addr: n1a
+        nodeid: 1
+    }
+}
+
+nodelist {
+    node {
+        ring0_addr: n2a
+        ring1_addr: n2b
+        name: n2n
+        nodeid: 2
+    }
+}
+"""
+        facade = lib.ConfigFacade.from_string(config)
+        nodes = facade.get_nodes()
+        self.assertEqual(2, len(nodes))
+        self.assert_equal_nodelist(
+            [
+                {"ring0": "n1a", "ring1": None, "label": "n1a", "id": "1"},
+                {"ring0": "n2a", "ring1": "n2b", "label": "n2n", "id": "2"},
+            ],
+            nodes
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+
+
+class GetQuorumOptionsTest(TestCase):
+    def test_no_quorum(self):
+        config = ""
+        facade = lib.ConfigFacade.from_string(config)
+        options = facade.get_quorum_options()
+        self.assertEqual({}, options)
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_empty_quorum(self):
+        config = """\
+quorum {
+}
+"""
+        facade = lib.ConfigFacade.from_string(config)
+        options = facade.get_quorum_options()
+        self.assertEqual({}, options)
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_no_options(self):
+        config = """\
+quorum {
+    provider: corosync_votequorum
+}
+"""
+        facade = lib.ConfigFacade.from_string(config)
+        options = facade.get_quorum_options()
+        self.assertEqual({}, options)
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_some_options(self):
+        config = """\
+quorum {
+    provider: corosync_votequorum
+    wait_for_all: 0
+    nonsense: ignored
+    auto_tie_breaker: 1
+    last_man_standing: 0
+    last_man_standing_window: 1000
+}
+"""
+        facade = lib.ConfigFacade.from_string(config)
+        options = facade.get_quorum_options()
+        self.assertEqual(
+            {
+                "auto_tie_breaker": "1",
+                "last_man_standing": "0",
+                "last_man_standing_window": "1000",
+                "wait_for_all": "0",
+            },
+            options
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_option_repeated(self):
+        config = """\
+quorum {
+    wait_for_all: 0
+    wait_for_all: 1
+}
+"""
+        facade = lib.ConfigFacade.from_string(config)
+        options = facade.get_quorum_options()
+        self.assertEqual(
+            {
+                "wait_for_all": "1",
+            },
+            options
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_quorum_repeated(self):
+        config = """\
+quorum {
+    wait_for_all: 0
+    last_man_standing: 0
+}
+quorum {
+    last_man_standing_window: 1000
+    wait_for_all: 1
+}
+"""
+        facade = lib.ConfigFacade.from_string(config)
+        options = facade.get_quorum_options()
+        self.assertEqual(
+            {
+                "last_man_standing": "0",
+                "last_man_standing_window": "1000",
+                "wait_for_all": "1",
+            },
+            options
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+
+
+class SetQuorumOptionsTest(TestCase):
+    def get_two_node(self, facade):
+        two_node = None
+        for quorum in facade.config.get_sections("quorum"):
+            for dummy_name, value in quorum.get_attributes("two_node"):
+                two_node = value
+        return two_node
+
+    def test_add_missing_section(self):
+        config = ""
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.set_quorum_options(reporter, {"wait_for_all": "0"})
+        self.assertTrue(facade.need_stopped_cluster)
+        self.assertEqual(
+            """\
+quorum {
+    wait_for_all: 0
+}
+""",
+            facade.config.export()
+        )
+        self.assertEqual([], reporter.report_item_list)
+
+    def test_del_missing_section(self):
+        config = ""
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.set_quorum_options(reporter, {"wait_for_all": ""})
+        self.assertTrue(facade.need_stopped_cluster)
+        self.assertEqual("", facade.config.export())
+        self.assertEqual([], reporter.report_item_list)
+
+    def test_add_all_options(self):
+        config = open(rc("corosync.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        expected_options = {
+            "auto_tie_breaker": "1",
+            "last_man_standing": "0",
+            "last_man_standing_window": "1000",
+            "wait_for_all": "0",
+        }
+        facade.set_quorum_options(reporter, expected_options)
+
+        self.assertTrue(facade.need_stopped_cluster)
+        test_facade = lib.ConfigFacade.from_string(facade.config.export())
+        self.assertEqual(
+            expected_options,
+            test_facade.get_quorum_options()
+        )
+        self.assertEqual([], reporter.report_item_list)
+
+    def test_complex(self):
+        config = """\
+quorum {
+    wait_for_all: 0
+    last_man_standing_window: 1000
+}
+quorum {
+    wait_for_all: 0
+    last_man_standing: 1
+}
+"""
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.set_quorum_options(
+            reporter,
+            {
+                "auto_tie_breaker": "1",
+                "wait_for_all": "1",
+                "last_man_standing_window": "",
+            }
+        )
+
+        self.assertTrue(facade.need_stopped_cluster)
+        test_facade = lib.ConfigFacade.from_string(facade.config.export())
+        self.assertEqual(
+            {
+                "auto_tie_breaker": "1",
+                "last_man_standing": "1",
+                "wait_for_all": "1",
+            },
+            test_facade.get_quorum_options()
+        )
+        self.assertEqual([], reporter.report_item_list)
+
+    def test_2nodes_atb_on(self):
+        config = open(rc("corosync.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertEqual(2, len(facade.get_nodes()))
+
+        facade.set_quorum_options(reporter, {"auto_tie_breaker": "1"})
+
+        self.assertTrue(facade.need_stopped_cluster)
+        self.assertEqual(
+            "1",
+            facade.get_quorum_options().get("auto_tie_breaker", None)
+        )
+        self.assertEqual([], reporter.report_item_list)
+
+        two_node = self.get_two_node(facade)
+        self.assertTrue(two_node is None or two_node == "0")
+
+    def test_2nodes_atb_off(self):
+        config = open(rc("corosync.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertEqual(2, len(facade.get_nodes()))
+
+        facade.set_quorum_options(reporter, {"auto_tie_breaker": "0"})
+
+        self.assertTrue(facade.need_stopped_cluster)
+        self.assertEqual(
+            "0",
+            facade.get_quorum_options().get("auto_tie_breaker", None)
+        )
+        self.assertEqual([], reporter.report_item_list)
+
+        two_node = self.get_two_node(facade)
+        self.assertTrue(two_node == "1")
+
+    def test_3nodes_atb_on(self):
+        config = open(rc("corosync-3nodes.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertEqual(3, len(facade.get_nodes()))
+
+        facade.set_quorum_options(reporter, {"auto_tie_breaker": "1"})
+
+        self.assertTrue(facade.need_stopped_cluster)
+        self.assertEqual(
+            "1",
+            facade.get_quorum_options().get("auto_tie_breaker", None)
+        )
+        self.assertEqual([], reporter.report_item_list)
+
+        two_node = self.get_two_node(facade)
+        self.assertTrue(two_node is None or two_node == "0")
+
+    def test_3nodes_atb_off(self):
+        config = open(rc("corosync-3nodes.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertEqual(3, len(facade.get_nodes()))
+
+        facade.set_quorum_options(reporter, {"auto_tie_breaker": "0"})
+
+        self.assertTrue(facade.need_stopped_cluster)
+        self.assertEqual(
+            "0",
+            facade.get_quorum_options().get("auto_tie_breaker", None)
+        )
+        self.assertEqual([], reporter.report_item_list)
+
+        two_node = self.get_two_node(facade)
+        self.assertTrue(two_node is None or two_node == "0")
+
+    def test_invalid_value_no_effect_on_config(self):
+        config= """\
+quorum {
+    auto_tie_breaker: 1
+    wait_for_all: 1
+    last_man_standing: 1
+}
+"""
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        options = {
+            "auto_tie_breaker": "",
+            "wait_for_all": "nonsense",
+            "last_man_standing": "0",
+            "last_man_standing_window": "250",
+        }
+        assert_raise_library_error(
+            lambda: facade.set_quorum_options(reporter, options),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "wait_for_all",
+                    "option_value": "nonsense",
+                    "allowed_values": ("0", "1"),
+                }
+            )
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertEqual(
+            lib.ConfigFacade.from_string(config).get_quorum_options(),
+            facade.get_quorum_options()
+        )
+
+    def test_invalid_all_values(self):
+        config= ""
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        options = {
+            "auto_tie_breaker": "atb",
+            "last_man_standing": "lms",
+            "last_man_standing_window": "lmsw",
+            "wait_for_all": "wfa",
+        }
+        assert_raise_library_error(
+            lambda: facade.set_quorum_options(reporter, options),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "auto_tie_breaker",
+                    "option_value": "atb",
+                    "allowed_values": ("0", "1"),
+                }
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "last_man_standing",
+                    "option_value": "lms",
+                    "allowed_values": ("0", "1"),
+                }
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "last_man_standing_window",
+                    "option_value": "lmsw",
+                    "allowed_values": "positive integer",
+                }
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "wait_for_all",
+                    "option_value": "wfa",
+                    "allowed_values": ("0", "1"),
+                }
+            )
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertEqual(
+            lib.ConfigFacade.from_string(config).get_quorum_options(),
+            facade.get_quorum_options()
+        )
+
+    def test_invalid_option(self):
+        config= ""
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        options = {
+            "auto_tie_breaker": "1",
+            "nonsense1": "0",
+            "nonsense2": "doesnt matter",
+        }
+        assert_raise_library_error(
+            lambda: facade.set_quorum_options(reporter, options),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "nonsense1",
+                    "option_type": "quorum",
+                    "allowed": [
+                        "auto_tie_breaker",
+                        "last_man_standing",
+                        "last_man_standing_window",
+                        "wait_for_all"
+                    ],
+                }
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "nonsense2",
+                    "option_type": "quorum",
+                    "allowed": [
+                        "auto_tie_breaker",
+                        "last_man_standing",
+                        "last_man_standing_window",
+                        "wait_for_all"
+                    ],
+                }
+            )
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertEqual(
+            lib.ConfigFacade.from_string(config).get_quorum_options(),
+            facade.get_quorum_options()
+        )
+
+
+class HasQuorumDeviceTest(TestCase):
+    def test_empty_config(self):
+        config = ""
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertFalse(facade.has_quorum_device())
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_no_device(self):
+        config = open(rc("corosync.conf")).read()
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertFalse(facade.has_quorum_device())
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_empty_device(self):
+        config = """\
+quorum {
+    device {
+    }
+}
+"""
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertFalse(facade.has_quorum_device())
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_device_set(self):
+        config = """\
+quorum {
+    device {
+        model: net
+    }
+}
+"""
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertTrue(facade.has_quorum_device())
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_no_model(self):
+        config = """\
+quorum {
+    device {
+        option: value
+        net {
+            host: 127.0.0.1
+        }
+    }
+}
+"""
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertFalse(facade.has_quorum_device())
+        self.assertFalse(facade.need_stopped_cluster)
+
+
+class GetQuorumDeviceSettingsTest(TestCase):
+    def test_empty_config(self):
+        config = ""
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertEqual(
+            (None, {}, {}),
+            facade.get_quorum_device_settings()
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_no_device(self):
+        config = open(rc("corosync.conf")).read()
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertEqual(
+            (None, {}, {}),
+            facade.get_quorum_device_settings()
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_empty_device(self):
+        config = """\
+quorum {
+    device {
+    }
+}
+"""
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertEqual(
+            (None, {}, {}),
+            facade.get_quorum_device_settings()
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_no_model(self):
+        config = """\
+quorum {
+    device {
+        option: value
+        net {
+            host: 127.0.0.1
+        }
+    }
+}
+"""
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertEqual(
+            (None, {}, {"option": "value"}),
+            facade.get_quorum_device_settings()
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_configured_properly(self):
+        config = """\
+quorum {
+    device {
+        option: value
+        model: net
+        net {
+            host: 127.0.0.1
+        }
+    }
+}
+"""
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertEqual(
+            ("net", {"host": "127.0.0.1"}, {"option": "value"}),
+            facade.get_quorum_device_settings()
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_more_devices_one_quorum(self):
+        config = """\
+quorum {
+    device {
+        option0: valueX
+        option1: value1
+        model: disk
+        net {
+            host: 127.0.0.1
+        }
+    }
+    device {
+        option0: valueY
+        option2: value2
+        model: net
+        disk {
+            path: /dev/quorum_disk
+        }
+    }
+}
+"""
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertEqual(
+            (
+                "net",
+                {"host": "127.0.0.1"},
+                {"option0": "valueY", "option1": "value1", "option2": "value2"}
+            ),
+            facade.get_quorum_device_settings()
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_more_devices_more_quorum(self):
+        config = """\
+quorum {
+    device {
+        option0: valueX
+        option1: value1
+        model: disk
+        net {
+            host: 127.0.0.1
+        }
+    }
+}
+quorum {
+    device {
+        option0: valueY
+        option2: value2
+        model: net
+        disk {
+            path: /dev/quorum_disk
+        }
+    }
+}
+"""
+        facade = lib.ConfigFacade.from_string(config)
+        self.assertEqual(
+            (
+                "net",
+                {"host": "127.0.0.1"},
+                {"option0": "valueY", "option1": "value1", "option2": "value2"}
+            ),
+            facade.get_quorum_device_settings()
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+
+
+class AddQuorumDeviceTest(TestCase):
+    def test_already_exists(self):
+        config = """\
+totem {
+    version: 2
+}
+
+quorum {
+    provider: corosync_votequorum
+
+    device {
+        option: value
+        model: net
+
+        net {
+            host: 127.0.0.1
+        }
+    }
+}
+"""
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        assert_raise_library_error(
+            lambda: facade.add_quorum_device(
+                reporter,
+                "net",
+                {"host": "127.0.0.1"},
+                {}
+            ),
+            (
+                severity.ERROR,
+                report_codes.QDEVICE_ALREADY_DEFINED,
+                {},
+            )
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        ac(config, facade.config.export())
+
+    def test_success_net_minimal(self):
+        config = open(rc("corosync-3nodes.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.add_quorum_device(
+            reporter,
+            "net",
+            {"host": "127.0.0.1"},
+            {}
+        )
+        ac(
+            config.replace(
+                "    provider: corosync_votequorum",
+                """\
+    provider: corosync_votequorum
+
+    device {
+        model: net
+
+        net {
+            host: 127.0.0.1
+        }
+    }"""
+            ),
+            facade.config.export()
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertEqual([], reporter.report_item_list)
+
+    def test_success_net_full(self):
+        config = open(rc("corosync-3nodes.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.add_quorum_device(
+            reporter,
+            "net",
+            {
+                "host": "127.0.0.1",
+                "port": "4433",
+                "algorithm": "ffsplit",
+                "connect_timeout": "12345",
+                "force_ip_version": "4",
+                "tie_breaker": "lowest",
+            },
+            {
+                "timeout": "23456",
+                "sync_timeout": "34567"
+            }
+        )
+        ac(
+            config.replace(
+                "    provider: corosync_votequorum",
+                """\
+    provider: corosync_votequorum
+
+    device {
+        sync_timeout: 34567
+        timeout: 23456
+        model: net
+
+        net {
+            algorithm: ffsplit
+            connect_timeout: 12345
+            force_ip_version: 4
+            host: 127.0.0.1
+            port: 4433
+            tie_breaker: lowest
+        }
+    }"""
+            ),
+            facade.config.export()
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertEqual([], reporter.report_item_list)
+
+    def test_succes_net_lms_3node(self):
+        config = open(rc("corosync-3nodes.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.add_quorum_device(
+            reporter,
+            "net",
+            {"host": "127.0.0.1", "algorithm": "lms"},
+            {}
+        )
+        ac(
+            config.replace(
+                "    provider: corosync_votequorum",
+                """\
+    provider: corosync_votequorum
+
+    device {
+        model: net
+
+        net {
+            algorithm: lms
+            host: 127.0.0.1
+        }
+    }"""
+            ),
+            facade.config.export()
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertEqual([], reporter.report_item_list)
+
+    def test_succes_net_2nodelms_3node(self):
+        config = open(rc("corosync-3nodes.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.add_quorum_device(
+            reporter,
+            "net",
+            {"host": "127.0.0.1", "algorithm": "2nodelms"},
+            {}
+        )
+        ac(
+            config.replace(
+                "    provider: corosync_votequorum",
+                """\
+    provider: corosync_votequorum
+
+    device {
+        model: net
+
+        net {
+            algorithm: lms
+            host: 127.0.0.1
+        }
+    }"""
+            ),
+            facade.config.export()
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertEqual([], reporter.report_item_list)
+
+    def test_succes_net_lms_2node(self):
+        config = open(rc("corosync.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.add_quorum_device(
+            reporter,
+            "net",
+            {"host": "127.0.0.1", "algorithm": "lms"},
+            {}
+        )
+        ac(
+            config.replace(
+                "    provider: corosync_votequorum",
+                """\
+    provider: corosync_votequorum
+
+    device {
+        model: net
+
+        net {
+            algorithm: 2nodelms
+            host: 127.0.0.1
+        }
+    }"""
+            ).replace("    two_node: 1\n", ""),
+            facade.config.export()
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertEqual([], reporter.report_item_list)
+
+    def test_succes_net_2nodelms_2node(self):
+        config = open(rc("corosync.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.add_quorum_device(
+            reporter,
+            "net",
+            {"host": "127.0.0.1", "algorithm": "2nodelms"},
+            {}
+        )
+        ac(
+            config.replace(
+                "    provider: corosync_votequorum",
+                """\
+    provider: corosync_votequorum
+
+    device {
+        model: net
+
+        net {
+            algorithm: 2nodelms
+            host: 127.0.0.1
+        }
+    }"""
+            ).replace("    two_node: 1\n", ""),
+            facade.config.export()
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertEqual([], reporter.report_item_list)
+
+    def test_remove_conflicting_options(self):
+        config = open(rc("corosync.conf")).read()
+        config = config.replace(
+            "    two_node: 1\n",
+            "\n".join([
+                "    two_node: 1",
+                "    auto_tie_breaker: 1",
+                "    last_man_standing: 1",
+                "    last_man_standing_window: 987",
+                "    allow_downscale: 1",
+                ""
+            ])
+        )
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.add_quorum_device(
+            reporter,
+            "net",
+            {"host": "127.0.0.1"},
+            {}
+        )
+        ac(
+            re.sub(
+                re.compile(r"quorum {[^}]*}", re.MULTILINE | re.DOTALL),
+                """\
+quorum {
+    provider: corosync_votequorum
+
+    device {
+        model: net
+
+        net {
+            host: 127.0.0.1
+        }
+    }
+}""",
+                config
+            ),
+            facade.config.export()
+        )
+        self.assertTrue(facade.need_stopped_cluster)
+        self.assertEqual([], reporter.report_item_list)
+
+    def test_remove_old_configuration(self):
+        config = """\
+quorum {
+    provider: corosync_votequorum
+    device {
+        option: value_old1
+    }
+}
+quorum {
+    provider: corosync_votequorum
+    device {
+        option: value_old2
+    }
+}
+        """
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.add_quorum_device(
+            reporter,
+            "net",
+            {"host": "127.0.0.1"},
+            {}
+        )
+        ac(
+            """\
+quorum {
+    provider: corosync_votequorum
+}
+
+quorum {
+    provider: corosync_votequorum
+
+    device {
+        model: net
+
+        net {
+            host: 127.0.0.1
+        }
+    }
+}
+"""
+            ,
+            facade.config.export()
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        self.assertEqual([], reporter.report_item_list)
+
+    def test_bad_model(self):
+        config = open(rc("corosync-3nodes.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        assert_raise_library_error(
+            lambda: facade.add_quorum_device(reporter, "invalid", {}, {}),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "model",
+                    "option_value": "invalid",
+                    "allowed_values": ("net", ),
+                },
+                report_codes.FORCE_QDEVICE_MODEL
+            )
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        ac(config, facade.config.export())
+
+    def test_bad_model_forced(self):
+        config = open(rc("corosync-3nodes.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.add_quorum_device(reporter, "invalid", {}, {}, force_model=True)
+        ac(
+            config.replace(
+                "    provider: corosync_votequorum",
+                """\
+    provider: corosync_votequorum
+
+    device {
+        model: invalid
+    }"""
+            ),
+            facade.config.export()
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        assert_report_item_list_equal(
+            reporter.report_item_list,
+            [
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION_VALUE,
+                    {
+                        "option_name": "model",
+                        "option_value": "invalid",
+                        "allowed_values": ("net", ),
+                    },
+                )
+            ]
+        )
+
+    def test_missing_required_options_net(self):
+        config = open(rc("corosync-3nodes.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        assert_raise_library_error(
+            lambda: facade.add_quorum_device(reporter, "net", {}, {}),
+            (
+                severity.ERROR,
+                report_codes.REQUIRED_OPTION_IS_MISSING,
+                {"option_name": "host"}
+            )
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        ac(config, facade.config.export())
+
+    def test_bad_options_net(self):
+        config = open(rc("corosync-3nodes.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        assert_raise_library_error(
+            lambda: facade.add_quorum_device(
+                reporter,
+                "net",
+                {
+                    "host": "",
+                    "port": "65537",
+                    "algorithm": "bad algorithm",
+                    "connect_timeout": "-1",
+                    "force_ip_version": "3",
+                    "tie_breaker": "125",
+                    "bad_model_option": "bad model value",
+                },
+                {
+                    "timeout": "-2",
+                    "sync_timeout": "-3",
+                    "bad_generic_option": "bad generic value",
+                    "model": "some model",
+                }
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "algorithm",
+                    "option_value": "bad algorithm",
+                    "allowed_values": ("2nodelms", "ffsplit", "lms"),
+                },
+                report_codes.FORCE_OPTIONS
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "bad_model_option",
+                    "option_type": "quorum device model",
+                    "allowed": [
+                        "algorithm",
+                        "connect_timeout",
+                        "force_ip_version",
+                        "host",
+                        "port",
+                        "tie_breaker",
+                    ],
+                },
+                report_codes.FORCE_OPTIONS
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "connect_timeout",
+                    "option_value": "-1",
+                    "allowed_values": "1000-120000",
+                },
+                report_codes.FORCE_OPTIONS
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "force_ip_version",
+                    "option_value": "3",
+                    "allowed_values": ("0", "4", "6"),
+                },
+                report_codes.FORCE_OPTIONS
+            ),
+            (
+                severity.ERROR,
+                report_codes.REQUIRED_OPTION_IS_MISSING,
+                {"option_name": "host"}
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "port",
+                    "option_value": "65537",
+                    "allowed_values": "1-65535",
+                },
+                report_codes.FORCE_OPTIONS
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "tie_breaker",
+                    "option_value": "125",
+                    "allowed_values": ["lowest", "highest", "valid node id"],
+                },
+                report_codes.FORCE_OPTIONS
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "bad_generic_option",
+                    "option_type": "quorum device",
+                    "allowed": ["sync_timeout", "timeout"],
+                },
+                report_codes.FORCE_OPTIONS
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "model",
+                    "option_type": "quorum device",
+                    "allowed": ["sync_timeout", "timeout"],
+                }
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "sync_timeout",
+                    "option_value": "-3",
+                    "allowed_values": "positive integer",
+                },
+                report_codes.FORCE_OPTIONS
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "timeout",
+                    "option_value": "-2",
+                    "allowed_values": "positive integer",
+                },
+                report_codes.FORCE_OPTIONS
+            )
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        ac(config, facade.config.export())
+
+    def test_mandatory_options_missing_net_forced(self):
+        config = open(rc("corosync-3nodes.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        assert_raise_library_error(
+            lambda: facade.add_quorum_device(
+                reporter, "net", {}, {},
+                force_model=True, force_options=True
+            ),
+            (
+                severity.ERROR,
+                report_codes.REQUIRED_OPTION_IS_MISSING,
+                {"option_name": "host"}
+            )
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        ac(config, facade.config.export())
+
+    def test_mandatory_options_empty_net_forced(self):
+        config = open(rc("corosync-3nodes.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        assert_raise_library_error(
+            lambda: facade.add_quorum_device(
+                reporter, "net", {"host": ""}, {},
+                force_model=True, force_options=True
+            ),
+            (
+                severity.ERROR,
+                report_codes.REQUIRED_OPTION_IS_MISSING,
+                {"option_name": "host"}
+            )
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        ac(config, facade.config.export())
+
+    def test_bad_options_net_forced(self):
+        config = open(rc("corosync-3nodes.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.add_quorum_device(
+            reporter,
+            "net",
+            {
+                "host": "127.0.0.1",
+                "port": "65537",
+                "algorithm": "bad algorithm",
+                "connect_timeout": "-1",
+                "force_ip_version": "3",
+                "tie_breaker": "125",
+                "bad_model_option": "bad model value",
+            },
+            {
+                "timeout": "-2",
+                "sync_timeout": "-3",
+                "bad_generic_option": "bad generic value",
+            },
+            force_options=True
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        ac(
+            config.replace(
+                "    provider: corosync_votequorum",
+                """\
+    provider: corosync_votequorum
+
+    device {
+        bad_generic_option: bad generic value
+        sync_timeout: -3
+        timeout: -2
+        model: net
+
+        net {
+            algorithm: bad algorithm
+            bad_model_option: bad model value
+            connect_timeout: -1
+            force_ip_version: 3
+            host: 127.0.0.1
+            port: 65537
+            tie_breaker: 125
+        }
+    }"""
+            ),
+            facade.config.export()
+        )
+        assert_report_item_list_equal(
+            reporter.report_item_list,
+            [
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION_VALUE,
+                    {
+                        "option_name": "algorithm",
+                        "option_value": "bad algorithm",
+                        "allowed_values": ("2nodelms", "ffsplit", "lms"),
+                    }
+                ),
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_name": "bad_model_option",
+                        "option_type": "quorum device model",
+                        "allowed": [
+                            "algorithm",
+                            "connect_timeout",
+                            "force_ip_version",
+                            "host",
+                            "port",
+                            "tie_breaker",
+                        ],
+                    }
+                ),
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION_VALUE,
+                    {
+                        "option_name": "connect_timeout",
+                        "option_value": "-1",
+                        "allowed_values": "1000-120000",
+                    }
+                ),
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION_VALUE,
+                    {
+                        "option_name": "force_ip_version",
+                        "option_value": "3",
+                        "allowed_values": ("0", "4", "6"),
+                    }
+                ),
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION_VALUE,
+                    {
+                        "option_name": "port",
+                        "option_value": "65537",
+                        "allowed_values": "1-65535",
+                    }
+                ),
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION_VALUE,
+                    {
+                        "option_name": "tie_breaker",
+                        "option_value": "125",
+                        "allowed_values": ["lowest", "highest", "valid node id"],
+                    }
+                ),
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_name": "bad_generic_option",
+                        "option_type": "quorum device",
+                        "allowed": ["sync_timeout", "timeout"],
+                    }
+                ),
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION_VALUE,
+                    {
+                        "option_name": "sync_timeout",
+                        "option_value": "-3",
+                        "allowed_values": "positive integer",
+                    }
+                ),
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION_VALUE,
+                    {
+                        "option_name": "timeout",
+                        "option_value": "-2",
+                        "allowed_values": "positive integer",
+                    }
+                )
+            ]
+        )
+
+class UpdateQuorumDeviceTest(TestCase):
+    def fixture_add_device(self, config):
+        return re.sub(
+            re.compile(r"quorum {[^}]*}", re.MULTILINE | re.DOTALL),
+            """\
+quorum {
+    provider: corosync_votequorum
+
+    device {
+        timeout: 12345
+        model: net
+
+        net {
+            host: 127.0.0.1
+            port: 4433
+        }
+    }
+}""",
+            config
+        )
+
+    def test_not_existing(self):
+        config = open(rc("corosync.conf")).read()
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        assert_raise_library_error(
+            lambda: facade.update_quorum_device(
+                reporter,
+                {"host": "127.0.0.1"},
+                {}
+            ),
+            (
+                severity.ERROR,
+                report_codes.QDEVICE_NOT_DEFINED,
+                {}
+            )
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        ac(config, facade.config.export())
+
+    def test_success_model_options_net(self):
+        config = self.fixture_add_device(
+            open(rc("corosync-3nodes.conf")).read()
+        )
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.update_quorum_device(
+            reporter,
+            {"host": "127.0.0.2", "port": "", "algorithm": "ffsplit"},
+            {}
+        )
+        self.assertTrue(facade.need_stopped_cluster)
+        ac(
+            config.replace(
+                "host: 127.0.0.1\n            port: 4433",
+                "host: 127.0.0.2\n            algorithm: ffsplit"
+            ),
+            facade.config.export()
+        )
+        self.assertEqual([], reporter.report_item_list)
+
+    def test_success_net_3node_2nodelms(self):
+        config = self.fixture_add_device(
+            open(rc("corosync-3nodes.conf")).read()
+        )
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.update_quorum_device(
+            reporter,
+            {"algorithm": "2nodelms"},
+            {}
+        )
+        self.assertTrue(facade.need_stopped_cluster)
+        ac(
+            config.replace(
+                "port: 4433",
+                "port: 4433\n            algorithm: lms"
+            ),
+            facade.config.export()
+        )
+        self.assertEqual([], reporter.report_item_list)
+
+    def test_success_net_doesnt_require_host(self):
+        config = self.fixture_add_device(
+            open(rc("corosync-3nodes.conf")).read()
+        )
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.update_quorum_device(reporter, {"port": "4444"}, {})
+        self.assertTrue(facade.need_stopped_cluster)
+        ac(
+            config.replace(
+                "host: 127.0.0.1\n            port: 4433",
+                "host: 127.0.0.1\n            port: 4444"
+            ),
+            facade.config.export()
+        )
+        self.assertEqual([], reporter.report_item_list)
+
+    def test_net_host_cannot_be_removed(self):
+        config = self.fixture_add_device(
+            open(rc("corosync-3nodes.conf")).read()
+        )
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        assert_raise_library_error(
+            lambda: facade.update_quorum_device(reporter, {"host": ""}, {}),
+            (
+                severity.ERROR,
+                report_codes.REQUIRED_OPTION_IS_MISSING,
+                {"option_name": "host"},
+            )
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        ac(config, facade.config.export())
+
+    def test_net_host_cannot_be_removed_forced(self):
+        config = self.fixture_add_device(
+            open(rc("corosync-3nodes.conf")).read()
+        )
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        assert_raise_library_error(
+            lambda: facade.update_quorum_device(
+                reporter, {"host": ""}, {}, force_options=True
+            ),
+            (
+                severity.ERROR,
+                report_codes.REQUIRED_OPTION_IS_MISSING,
+                {"option_name": "host"},
+            )
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        ac(config, facade.config.export())
+
+    def test_bad_net_options(self):
+        config = self.fixture_add_device(
+            open(rc("corosync-3nodes.conf")).read()
+        )
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        assert_raise_library_error(
+            lambda: facade.update_quorum_device(
+                reporter,
+                {
+                    "port": "65537",
+                    "algorithm": "bad algorithm",
+                    "connect_timeout": "-1",
+                    "force_ip_version": "3",
+                    "tie_breaker": "125",
+                    "bad_model_option": "bad model value",
+                },
+                {}
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "algorithm",
+                    "option_value": "bad algorithm",
+                    "allowed_values": ("2nodelms", "ffsplit", "lms"),
+                },
+                report_codes.FORCE_OPTIONS
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "bad_model_option",
+                    "option_type": "quorum device model",
+                    "allowed": [
+                        "algorithm",
+                        "connect_timeout",
+                        "force_ip_version",
+                        "host",
+                        "port",
+                        "tie_breaker",
+                    ],
+                },
+                report_codes.FORCE_OPTIONS
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "connect_timeout",
+                    "option_value": "-1",
+                    "allowed_values": "1000-120000",
+                },
+                report_codes.FORCE_OPTIONS
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "force_ip_version",
+                    "option_value": "3",
+                    "allowed_values": ("0", "4", "6"),
+                },
+                report_codes.FORCE_OPTIONS
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "port",
+                    "option_value": "65537",
+                    "allowed_values": "1-65535",
+                },
+                report_codes.FORCE_OPTIONS
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "tie_breaker",
+                    "option_value": "125",
+                    "allowed_values": ["lowest", "highest", "valid node id"],
+                },
+                report_codes.FORCE_OPTIONS
+            ),
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        ac(config, facade.config.export())
+
+    def test_bad_net_options_forced(self):
+        config = self.fixture_add_device(
+            open(rc("corosync-3nodes.conf")).read()
+        )
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.update_quorum_device(
+            reporter,
+            {
+                "port": "65537",
+                "algorithm": "bad algorithm",
+                "connect_timeout": "-1",
+                "force_ip_version": "3",
+                "tie_breaker": "125",
+                "bad_model_option": "bad model value",
+            },
+            {},
+            force_options=True
+        )
+        self.assertTrue(facade.need_stopped_cluster)
+        ac(
+            config.replace(
+                "            host: 127.0.0.1\n            port: 4433",
+                """\
+            host: 127.0.0.1
+            port: 65537
+            algorithm: bad algorithm
+            bad_model_option: bad model value
+            connect_timeout: -1
+            force_ip_version: 3
+            tie_breaker: 125"""
+            ),
+            facade.config.export()
+        )
+        assert_report_item_list_equal(
+            reporter.report_item_list,
+            [
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION_VALUE,
+                    {
+                        "option_name": "algorithm",
+                        "option_value": "bad algorithm",
+                        "allowed_values": ("2nodelms", "ffsplit", "lms"),
+                    },
+                ),
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_name": "bad_model_option",
+                        "option_type": "quorum device model",
+                        "allowed": [
+                            "algorithm",
+                            "connect_timeout",
+                            "force_ip_version",
+                            "host",
+                            "port",
+                            "tie_breaker",
+                        ],
+                    },
+                ),
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION_VALUE,
+                    {
+                        "option_name": "connect_timeout",
+                        "option_value": "-1",
+                        "allowed_values": "1000-120000",
+                    },
+                ),
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION_VALUE,
+                    {
+                        "option_name": "force_ip_version",
+                        "option_value": "3",
+                        "allowed_values": ("0", "4", "6"),
+                    },
+                ),
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION_VALUE,
+                    {
+                        "option_name": "port",
+                        "option_value": "65537",
+                        "allowed_values": "1-65535",
+                    },
+                ),
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION_VALUE,
+                    {
+                        "option_name": "tie_breaker",
+                        "option_value": "125",
+                        "allowed_values": ["lowest", "highest", "valid node id"],
+                    },
+                ),
+            ]
+        )
+
+    def test_success_generic_options(self):
+        config = self.fixture_add_device(
+            open(rc("corosync-3nodes.conf")).read()
+        )
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.update_quorum_device(
+            reporter,
+            {},
+            {"timeout": "", "sync_timeout": "23456"}
+        )
+        self.assertTrue(facade.need_stopped_cluster)
+        ac(
+            config.replace(
+                "timeout: 12345\n        model: net",
+                "model: net\n        sync_timeout: 23456",
+            ),
+            facade.config.export()
+        )
+        self.assertEqual([], reporter.report_item_list)
+
+    def test_success_both_options(self):
+        config = self.fixture_add_device(
+            open(rc("corosync-3nodes.conf")).read()
+        )
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.update_quorum_device(
+            reporter,
+            {"port": "4444"},
+            {"timeout": "23456"}
+        )
+        self.assertTrue(facade.need_stopped_cluster)
+        ac(
+            config
+                .replace("port: 4433", "port: 4444")
+                .replace("timeout: 12345", "timeout: 23456")
+            ,
+            facade.config.export()
+        )
+        self.assertEqual([], reporter.report_item_list)
+
+    def test_bad_generic_options(self):
+        config = self.fixture_add_device(
+            open(rc("corosync-3nodes.conf")).read()
+        )
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        assert_raise_library_error(
+            lambda: facade.update_quorum_device(
+                reporter,
+                {},
+                {
+                    "timeout": "-2",
+                    "sync_timeout": "-3",
+                    "bad_generic_option": "bad generic value",
+                    "model": "some model",
+                }
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "bad_generic_option",
+                    "option_type": "quorum device",
+                    "allowed": ["sync_timeout", "timeout"],
+                },
+                report_codes.FORCE_OPTIONS
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "model",
+                    "option_type": "quorum device",
+                    "allowed": ["sync_timeout", "timeout"],
+                }
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "sync_timeout",
+                    "option_value": "-3",
+                    "allowed_values": "positive integer",
+                },
+                report_codes.FORCE_OPTIONS
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION_VALUE,
+                {
+                    "option_name": "timeout",
+                    "option_value": "-2",
+                    "allowed_values": "positive integer",
+                },
+                report_codes.FORCE_OPTIONS
+            )
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        ac(config, facade.config.export())
+
+    def test_bad_generic_options_cannot_force_model(self):
+        config = self.fixture_add_device(
+            open(rc("corosync-3nodes.conf")).read()
+        )
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        assert_raise_library_error(
+            lambda: facade.update_quorum_device(
+                reporter,
+                {},
+                {"model": "some model", },
+                force_options=True
+            ),
+            (
+                severity.ERROR,
+                report_codes.INVALID_OPTION,
+                {
+                    "option_name": "model",
+                    "option_type": "quorum device",
+                    "allowed": ["sync_timeout", "timeout"],
+                }
+            )
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+        ac(config, facade.config.export())
+
+    def test_bad_generic_options_forced(self):
+        config = self.fixture_add_device(
+            open(rc("corosync-3nodes.conf")).read()
+        )
+        reporter = MockLibraryReportProcessor()
+        facade = lib.ConfigFacade.from_string(config)
+        facade.update_quorum_device(
+            reporter,
+            {},
+            {
+                "timeout": "-2",
+                "sync_timeout": "-3",
+                "bad_generic_option": "bad generic value",
+            },
+            force_options=True
+        )
+        self.assertTrue(facade.need_stopped_cluster)
+        ac(
+            config.replace(
+                "        timeout: 12345\n        model: net",
+                """\
+        timeout: -2
+        model: net
+        bad_generic_option: bad generic value
+        sync_timeout: -3"""
+            ),
+            facade.config.export()
+        )
+        assert_report_item_list_equal(
+            reporter.report_item_list,
+            [
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION,
+                    {
+                        "option_name": "bad_generic_option",
+                        "option_type": "quorum device",
+                        "allowed": ["sync_timeout", "timeout"],
+                    },
+                ),
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION_VALUE,
+                    {
+                        "option_name": "sync_timeout",
+                        "option_value": "-3",
+                        "allowed_values": "positive integer",
+                    },
+                ),
+                (
+                    severity.WARNING,
+                    report_codes.INVALID_OPTION_VALUE,
+                    {
+                        "option_name": "timeout",
+                        "option_value": "-2",
+                        "allowed_values": "positive integer",
+                    },
+                )
+            ]
+        )
+
+
+class RemoveQuorumDeviceTest(TestCase):
+    def test_empty_config(self):
+        config = ""
+        facade = lib.ConfigFacade.from_string(config)
+        assert_raise_library_error(
+            facade.remove_quorum_device,
+            (
+                severity.ERROR,
+                report_codes.QDEVICE_NOT_DEFINED,
+                {}
+            )
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_no_device(self):
+        config = open(rc("corosync-3nodes.conf")).read()
+        facade = lib.ConfigFacade.from_string(config)
+        assert_raise_library_error(
+            facade.remove_quorum_device,
+            (
+                severity.ERROR,
+                report_codes.QDEVICE_NOT_DEFINED,
+                {}
+            )
+        )
+        self.assertFalse(facade.need_stopped_cluster)
+
+    def test_remove_all_devices(self):
+        config_no_devices = open(rc("corosync-3nodes.conf")).read()
+        config = re.sub(
+            re.compile(r"quorum {[^}]*}", re.MULTILINE | re.DOTALL),
+            """\
+quorum {
+    provider: corosync_votequorum
+
+    device {
+        option: value
+        model: net
+
+        net {
+            host: 127.0.0.1
+            port: 4433
+        }
+    }
+
+    device {
+        option: value
+    }
+}
+
+quorum {
+    device {
+        option: value
+        model: disk
+
+        net {
+            host: 127.0.0.1
+            port: 4433
+        }
+    }
+}""",
+            config_no_devices
+        )
+        facade = lib.ConfigFacade.from_string(config)
+        facade.remove_quorum_device()
+        self.assertFalse(facade.need_stopped_cluster)
+        ac(
+            config_no_devices,
+            facade.config.export()
+        )
+
+    def test_restore_two_node(self):
+        config_no_devices = open(rc("corosync.conf")).read()
+        config = re.sub(
+            re.compile(r"quorum {[^}]*}", re.MULTILINE | re.DOTALL),
+            """\
+quorum {
+    provider: corosync_votequorum
+
+    device {
+        option: value
+        model: net
+
+        net {
+            host: 127.0.0.1
+            port: 4433
+        }
+    }
+}""",
+            config_no_devices
+        )
+        facade = lib.ConfigFacade.from_string(config)
+        facade.remove_quorum_device()
+        self.assertFalse(facade.need_stopped_cluster)
+        ac(
+            config_no_devices,
+            facade.config.export()
+        )
diff --git a/pcs/test/test_corosync_conf.py b/pcs/test/test_lib_corosync_config_parser.py
similarity index 83%
rename from pcs/test/test_corosync_conf.py
rename to pcs/test/test_lib_corosync_config_parser.py
index 541e3f0..da20889 100644
--- a/pcs/test/test_corosync_conf.py
+++ b/pcs/test/test_lib_corosync_config_parser.py
@@ -1,31 +1,48 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
-import os.path
-import sys
 import unittest
-parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-sys.path.insert(0, parentdir)
 
-from pcs_test_functions import pcs, ac
-import corosync_conf
+from pcs.test.tools.misc import ac
+
+from pcs.lib.corosync import config_parser
 
 
 class SectionTest(unittest.TestCase):
 
     def test_empty_section(self):
-        section = corosync_conf.Section("mySection")
+        section = config_parser.Section("mySection")
         self.assertEqual(section.parent, None)
         self.assertEqual(section.get_root(), section)
         self.assertEqual(section.name, "mySection")
         self.assertEqual(section.get_attributes(), [])
         self.assertEqual(section.get_sections(), [])
+        self.assertTrue(section.empty)
         ac(str(section), "")
 
+    def test_is_section_empty(self):
+        section = config_parser.Section("mySection")
+        self.assertTrue(section.empty)
+
+        section = config_parser.Section("mySection")
+        section.add_attribute("name", "value")
+        self.assertFalse(section.empty)
+
+        section = config_parser.Section("mySection")
+        section.add_section(config_parser.Section("subSection"))
+        self.assertFalse(section.empty)
+
+        section = config_parser.Section("mySection")
+        section.add_attribute("name", "value")
+        section.add_section(config_parser.Section("subSection"))
+        self.assertFalse(section.empty)
+
     def test_attribute_add(self):
-        section = corosync_conf.Section("mySection")
+        section = config_parser.Section("mySection")
 
         section.add_attribute("name1", "value1")
         self.assertEqual(
@@ -55,7 +72,7 @@ class SectionTest(unittest.TestCase):
         )
 
     def test_attribute_get(self):
-        section = corosync_conf.Section("mySection")
+        section = config_parser.Section("mySection")
         section.add_attribute("name1", "value1")
         section.add_attribute("name2", "value2")
         section.add_attribute("name3", "value3")
@@ -89,7 +106,7 @@ class SectionTest(unittest.TestCase):
         )
 
     def test_attribute_set(self):
-        section = corosync_conf.Section("mySection")
+        section = config_parser.Section("mySection")
 
         section.set_attribute("name1", "value1")
         self.assertEqual(
@@ -167,7 +184,7 @@ class SectionTest(unittest.TestCase):
         )
 
     def test_attribute_change(self):
-        section = corosync_conf.Section("mySection")
+        section = config_parser.Section("mySection")
         section.add_attribute("name1", "value1")
         section.add_attribute("name2", "value2")
         section.add_attribute("name3", "value3")
@@ -187,7 +204,7 @@ class SectionTest(unittest.TestCase):
         )
 
     def test_attribute_del(self):
-        section = corosync_conf.Section("mySection")
+        section = config_parser.Section("mySection")
         section.add_attribute("name1", "value1")
         section.add_attribute("name2", "value2")
         section.add_attribute("name3", "value3")
@@ -219,7 +236,7 @@ class SectionTest(unittest.TestCase):
         )
 
     def test_attribute_del_by_name(self):
-        section = corosync_conf.Section("mySection")
+        section = config_parser.Section("mySection")
         section.add_attribute("name1", "value1")
         section.add_attribute("name2", "value2")
         section.add_attribute("name3", "value3")
@@ -286,10 +303,10 @@ class SectionTest(unittest.TestCase):
         )
 
     def test_section_add(self):
-        root = corosync_conf.Section("root")
-        child1 = corosync_conf.Section("child1")
-        child1a = corosync_conf.Section("child1a")
-        child2 = corosync_conf.Section("child2")
+        root = config_parser.Section("root")
+        child1 = config_parser.Section("child1")
+        child1a = config_parser.Section("child1a")
+        child2 = config_parser.Section("child2")
 
         root.add_section(child1)
         child1.add_section(child1a)
@@ -321,28 +338,28 @@ child2 {
 """)
 
         self.assertRaises(
-            corosync_conf.CircularParentshipException,
+            config_parser.CircularParentshipException,
             child1a.add_section, child1a
         )
         self.assertRaises(
-            corosync_conf.CircularParentshipException,
+            config_parser.CircularParentshipException,
             child1a.add_section, child2
         )
         self.assertRaises(
-            corosync_conf.CircularParentshipException,
+            config_parser.CircularParentshipException,
             child1a.add_section, root
         )
 
     def test_section_get(self):
-        root = corosync_conf.Section("")
-        child1 = corosync_conf.Section("child1")
-        child2 = corosync_conf.Section("child2")
-        childa1 = corosync_conf.Section("childA")
-        childa2 = corosync_conf.Section("childA")
-        childa3 = corosync_conf.Section("childA")
-        childa4 = corosync_conf.Section("childA")
-        childb1 = corosync_conf.Section("childB")
-        childb2 = corosync_conf.Section("childB")
+        root = config_parser.Section("")
+        child1 = config_parser.Section("child1")
+        child2 = config_parser.Section("child2")
+        childa1 = config_parser.Section("childA")
+        childa2 = config_parser.Section("childA")
+        childa3 = config_parser.Section("childA")
+        childa4 = config_parser.Section("childA")
+        childb1 = config_parser.Section("childB")
+        childb2 = config_parser.Section("childB")
         childa1.add_attribute("id", "1")
         childa2.add_attribute("id", "2")
         childa3.add_attribute("id", "3")
@@ -455,15 +472,15 @@ childA {
         )
 
     def test_section_del(self):
-        root = corosync_conf.Section("")
-        child1 = corosync_conf.Section("child1")
-        child2 = corosync_conf.Section("child2")
-        childa1 = corosync_conf.Section("childA")
-        childa2 = corosync_conf.Section("childA")
-        childa3 = corosync_conf.Section("childA")
-        childa4 = corosync_conf.Section("childA")
-        childb1 = corosync_conf.Section("childB")
-        childb2 = corosync_conf.Section("childB")
+        root = config_parser.Section("")
+        child1 = config_parser.Section("child1")
+        child2 = config_parser.Section("child2")
+        childa1 = config_parser.Section("childA")
+        childa2 = config_parser.Section("childA")
+        childa3 = config_parser.Section("childA")
+        childa4 = config_parser.Section("childA")
+        childb1 = config_parser.Section("childB")
+        childb2 = config_parser.Section("childB")
         childa1.add_attribute("id", "1")
         childa2.add_attribute("id", "2")
         childa3.add_attribute("id", "3")
@@ -588,9 +605,9 @@ child1 {
         ac(str(root), "")
 
     def test_get_root(self):
-        root = corosync_conf.Section("root")
-        child1 = corosync_conf.Section("child1")
-        child1a = corosync_conf.Section("child1a")
+        root = config_parser.Section("root")
+        child1 = config_parser.Section("child1")
+        child1a = config_parser.Section("child1a")
         root.add_section(child1)
         child1.add_section(child1a)
 
@@ -599,7 +616,7 @@ child1 {
         self.assertEqual(child1a.get_root().name, "root")
 
     def test_str(self):
-        root = corosync_conf.Section("root")
+        root = config_parser.Section("root")
         ac(str(root), "")
 
         root.add_attribute("name1", "value1")
@@ -615,7 +632,7 @@ name2: value2a
 name3: value3
 """)
 
-        child1 = corosync_conf.Section("child1")
+        child1 = config_parser.Section("child1")
         root.add_section(child1)
         ac(str(root), """\
 name1: value1
@@ -641,7 +658,7 @@ child1 {
 }
 """)
 
-        child2 = corosync_conf.Section("child2")
+        child2 = config_parser.Section("child2")
         child2.add_attribute("name2.1", "value2.1")
         root.add_section(child2)
         ac(str(root), """\
@@ -660,7 +677,7 @@ child2 {
 }
 """)
 
-        child2a = corosync_conf.Section("child2a")
+        child2a = config_parser.Section("child2a")
         child2a.add_attribute("name2.a.1", "value2.a.1")
         child2.add_section(child2a)
         ac(str(root), """\
@@ -683,10 +700,10 @@ child2 {
 }
 """)
 
-        child3 = corosync_conf.Section("child3")
+        child3 = config_parser.Section("child3")
         root.add_section(child3)
-        child3.add_section(corosync_conf.Section("child3a"))
-        child3.add_section(corosync_conf.Section("child3b"))
+        child3.add_section(config_parser.Section("child3a"))
+        child3.add_section(config_parser.Section("child3b"))
         ac(str(root), """\
 name1: value1
 name2: value2
@@ -719,7 +736,7 @@ child3 {
 class ParserTest(unittest.TestCase):
 
     def test_empty(self):
-        ac(str(corosync_conf.parse_string("")), "")
+        ac(str(config_parser.parse_string("")), "")
 
     def test_attributes(self):
         string = """\
@@ -728,7 +745,7 @@ name:value\
         parsed = """\
 name: value
 """
-        ac(str(corosync_conf.parse_string(string)), parsed)
+        ac(str(config_parser.parse_string(string)), parsed)
 
         string = """\
 name:value
@@ -738,7 +755,7 @@ name:value
 name: value
 name: value
 """
-        ac(str(corosync_conf.parse_string(string)), parsed)
+        ac(str(config_parser.parse_string(string)), parsed)
 
         string = """\
   name1:value1  
@@ -752,7 +769,7 @@ name2: value2
 name3: value3
 name4: value4
 """
-        ac(str(corosync_conf.parse_string(string)), parsed)
+        ac(str(config_parser.parse_string(string)), parsed)
 
         string = """\
 name:foo:value
@@ -760,7 +777,7 @@ name:foo:value
         parsed = """\
 name: foo:value
 """
-        root = corosync_conf.parse_string(string)
+        root = config_parser.parse_string(string)
         self.assertEqual(root.get_attributes(), [["name", "foo:value"]])
         ac(str(root), parsed)
 
@@ -770,7 +787,7 @@ name :
         parsed = """\
 name: 
 """
-        root = corosync_conf.parse_string(string)
+        root = config_parser.parse_string(string)
         self.assertEqual(root.get_attributes(), [["name", ""]])
         ac(str(root), parsed)
 
@@ -783,7 +800,7 @@ section1 {
 section1 {
 }
 """
-        ac(str(corosync_conf.parse_string(string)), parsed)
+        ac(str(config_parser.parse_string(string)), parsed)
 
         string = """\
 section1 {
@@ -802,7 +819,7 @@ section1 {
     }
 }
 """
-        ac(str(corosync_conf.parse_string(string)), parsed)
+        ac(str(config_parser.parse_string(string)), parsed)
 
         string = """\
 section1 {
@@ -835,7 +852,7 @@ section2 {
     }
 }
 """
-        ac(str(corosync_conf.parse_string(string)), parsed)
+        ac(str(config_parser.parse_string(string)), parsed)
 
         string = """\
 section1 {
@@ -848,8 +865,8 @@ section1 {
 }
 """
         self.assertRaises(
-            corosync_conf.ParseErrorException,
-            corosync_conf.parse_string, string
+            config_parser.UnexpectedClosingBraceException,
+            config_parser.parse_string, string
         )
 
         string = """\
@@ -861,24 +878,24 @@ section1 {
 }
 """
         self.assertRaises(
-            corosync_conf.ParseErrorException,
-            corosync_conf.parse_string, string
+            config_parser.MissingClosingBraceException,
+            config_parser.parse_string, string
         )
 
         string = """\
 section1 {
 """
         self.assertRaises(
-            corosync_conf.ParseErrorException,
-            corosync_conf.parse_string, string
+            config_parser.MissingClosingBraceException,
+            config_parser.parse_string, string
         )
 
         string = """\
 }
 """
         self.assertRaises(
-            corosync_conf.ParseErrorException,
-            corosync_conf.parse_string, string
+            config_parser.UnexpectedClosingBraceException,
+            config_parser.parse_string, string
         )
 
 
@@ -899,7 +916,7 @@ name2: value2#junk3
 name3: value3 #junk4
 name4 # junk5: value4
 """
-        ac(str(corosync_conf.parse_string(string)), parsed)
+        ac(str(config_parser.parse_string(string)), parsed)
 
         string= """\
 # junk1
@@ -920,15 +937,15 @@ section2 # junk2 {
 section3 {
 }
 """
-        ac(str(corosync_conf.parse_string(string)), parsed)
+        ac(str(config_parser.parse_string(string)), parsed)
 
         string = """\
 section {
 #}
 """
         self.assertRaises(
-            corosync_conf.ParseErrorException,
-            corosync_conf.parse_string, string
+            config_parser.MissingClosingBraceException,
+            config_parser.parse_string, string
         )
 
         string = """\
@@ -936,8 +953,8 @@ section {
 }
 """
         self.assertRaises(
-            corosync_conf.ParseErrorException,
-            corosync_conf.parse_string, string
+            config_parser.UnexpectedClosingBraceException,
+            config_parser.parse_string, string
         )
 
     def test_full(self):
@@ -1057,7 +1074,7 @@ logging {
 quorum {
 }
 """
-        ac(str(corosync_conf.parse_string(string)), parsed)
+        ac(str(config_parser.parse_string(string)), parsed)
 
         string = """\
 # Please read the corosync.conf.5 manual page
@@ -1175,8 +1192,4 @@ nodelist {
 quorum {
 }
 """
-        ac(str(corosync_conf.parse_string(string)), parsed)
-
-
-if __name__ == "__main__":
-    unittest.main()
+        ac(str(config_parser.parse_string(string)), parsed)
diff --git a/pcs/test/test_lib_corosync_live.py b/pcs/test/test_lib_corosync_live.py
new file mode 100644
index 0000000..4878136
--- /dev/null
+++ b/pcs/test/test_lib_corosync_live.py
@@ -0,0 +1,101 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+
+import os.path
+
+from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.misc import get_test_resource as rc
+from pcs.test.tools.pcs_mock import mock
+
+from pcs import settings
+from pcs.common import report_codes
+from pcs.lib.errors import ReportItemSeverity as severity
+from pcs.lib.node import NodeAddresses
+from pcs.lib.external import CommandRunner, NodeCommunicator
+
+from pcs.lib.corosync import live as lib
+
+
+class GetLocalCorosyncConfTest(TestCase):
+    def test_success(self):
+        path = rc("corosync.conf")
+        settings.corosync_conf_file = path
+        self.assertEqual(
+            lib.get_local_corosync_conf(),
+            open(path).read()
+        )
+
+    def test_error(self):
+        path = rc("corosync.conf.nonexistent")
+        settings.corosync_conf_file = path
+        assert_raise_library_error(
+            lib.get_local_corosync_conf,
+            (
+                severity.ERROR,
+                report_codes.UNABLE_TO_READ_COROSYNC_CONFIG,
+                {
+                    "path": path,
+                    "reason": "No such file or directory",
+                }
+            )
+        )
+
+
+class ReloadConfigTest(TestCase):
+    def path(self, name):
+        return os.path.join(settings.corosync_binaries, name)
+
+    def test_success(self):
+        cmd_retval = 0
+        cmd_output = "cmd output"
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (cmd_output, cmd_retval)
+
+        lib.reload_config(mock_runner)
+
+        mock_runner.run.assert_called_once_with([
+            self.path("corosync-cfgtool"), "-R"
+        ])
+
+    def test_error(self):
+        cmd_retval = 1
+        cmd_output = "cmd output"
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (cmd_output, cmd_retval)
+
+        assert_raise_library_error(
+            lambda: lib.reload_config(mock_runner),
+            (
+                severity.ERROR,
+                report_codes.COROSYNC_CONFIG_RELOAD_ERROR,
+                {
+                    "reason": cmd_output,
+                }
+            )
+        )
+
+        mock_runner.run.assert_called_once_with([
+            self.path("corosync-cfgtool"), "-R"
+        ])
+
+
+class SetRemoteCorosyncConfTest(TestCase):
+    def test_success(self):
+        config = "test {\nconfig: data\n}\n"
+        node = NodeAddresses("node1")
+        mock_communicator = mock.MagicMock(spec_set=NodeCommunicator)
+        mock_communicator.call_node.return_value = "dummy return"
+
+        lib.set_remote_corosync_conf(mock_communicator, node, config)
+
+        mock_communicator.call_node.assert_called_once_with(
+            node,
+            "remote/set_corosync_conf",
+            "corosync_conf=test+%7B%0Aconfig%3A+data%0A%7D%0A"
+        )
diff --git a/pcs/test/test_lib_env.py b/pcs/test/test_lib_env.py
new file mode 100644
index 0000000..fbaac09
--- /dev/null
+++ b/pcs/test/test_lib_env.py
@@ -0,0 +1,354 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+import logging
+
+from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.misc import get_test_resource as rc
+from pcs.test.tools.pcs_mock import mock
+
+from pcs.lib.env import LibraryEnvironment
+from pcs.common import report_codes
+from pcs.lib import reports
+from pcs.lib.corosync.config_facade import ConfigFacade as CorosyncConfigFacade
+from pcs.lib.errors import (
+    LibraryError,
+    ReportItemSeverity as severity,
+)
+
+class LibraryEnvironmentTest(TestCase):
+    def setUp(self):
+        self.mock_logger = mock.MagicMock(logging.Logger)
+        self.mock_reporter = MockLibraryReportProcessor()
+
+    def test_logger(self):
+        env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+        self.assertEqual(self.mock_logger, env.logger)
+
+    def test_report_processor(self):
+        env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+        self.assertEqual(self.mock_reporter, env.report_processor)
+
+    def test_user_set(self):
+        user = "testuser"
+        env = LibraryEnvironment(
+            self.mock_logger,
+            self.mock_reporter,
+            user_login=user
+        )
+        self.assertEqual(user, env.user_login)
+
+    def test_user_not_set(self):
+        env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+        self.assertEqual(None, env.user_login)
+
+    def test_usergroups_set(self):
+        groups = ["some", "group"]
+        env = LibraryEnvironment(
+            self.mock_logger,
+            self.mock_reporter,
+            user_groups=groups
+        )
+        self.assertEqual(groups, env.user_groups)
+
+    def test_usergroups_not_set(self):
+        env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+        self.assertEqual([], env.user_groups)
+
+    @mock.patch("pcs.lib.env.is_cman_cluster")
+    def test_is_cman_cluster(self, mock_is_cman):
+        mock_is_cman.return_value = True
+        env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+        self.assertTrue(env.is_cman_cluster)
+        self.assertTrue(env.is_cman_cluster)
+        self.assertEqual(1, mock_is_cman.call_count)
+
+    @mock.patch("pcs.lib.env.replace_cib_configuration_xml")
+    @mock.patch("pcs.lib.env.get_cib_xml")
+    def test_cib_set(self, mock_get_cib, mock_push_cib):
+        cib_data = "test cib data"
+        new_cib_data = "new test cib data"
+        env = LibraryEnvironment(
+            self.mock_logger,
+            self.mock_reporter,
+            cib_data=cib_data
+        )
+
+        self.assertFalse(env.is_cib_live)
+
+        self.assertEqual(cib_data, env.get_cib_xml())
+        self.assertEqual(0, mock_get_cib.call_count)
+
+        env.push_cib_xml(new_cib_data)
+        self.assertEqual(0, mock_push_cib.call_count)
+
+        self.assertEqual(new_cib_data, env.get_cib_xml())
+        self.assertEqual(0, mock_get_cib.call_count)
+
+    @mock.patch("pcs.lib.env.replace_cib_configuration_xml")
+    @mock.patch("pcs.lib.env.get_cib_xml")
+    def test_cib_not_set(self, mock_get_cib, mock_push_cib):
+        cib_data = "test cib data"
+        new_cib_data = "new test cib data"
+        mock_get_cib.return_value = cib_data
+        env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        self.assertTrue(env.is_cib_live)
+
+        self.assertEqual(cib_data, env.get_cib_xml())
+        self.assertEqual(1, mock_get_cib.call_count)
+
+        env.push_cib_xml(new_cib_data)
+        self.assertEqual(1, mock_push_cib.call_count)
+
+    @mock.patch("pcs.lib.env.check_corosync_offline_on_nodes")
+    @mock.patch("pcs.lib.env.reload_corosync_config")
+    @mock.patch("pcs.lib.env.distribute_corosync_conf")
+    @mock.patch("pcs.lib.env.get_local_corosync_conf")
+    def test_corosync_conf_set(
+        self, mock_get_corosync, mock_distribute, mock_reload,
+        mock_check_offline
+    ):
+        corosync_data = "totem {\n    version: 2\n}\n"
+        new_corosync_data = "totem {\n    version: 3\n}\n"
+        env = LibraryEnvironment(
+            self.mock_logger,
+            self.mock_reporter,
+            corosync_conf_data=corosync_data
+        )
+
+        self.assertFalse(env.is_corosync_conf_live)
+
+        self.assertEqual(corosync_data, env.get_corosync_conf_data())
+        self.assertEqual(corosync_data, env.get_corosync_conf().config.export())
+        self.assertEqual(0, mock_get_corosync.call_count)
+
+        env.push_corosync_conf(
+            CorosyncConfigFacade.from_string(new_corosync_data)
+        )
+        self.assertEqual(0, mock_distribute.call_count)
+
+        self.assertEqual(new_corosync_data, env.get_corosync_conf_data())
+        self.assertEqual(0, mock_get_corosync.call_count)
+        mock_check_offline.assert_not_called()
+        mock_reload.assert_not_called()
+
+    @mock.patch("pcs.lib.env.reload_corosync_config")
+    @mock.patch("pcs.lib.env.distribute_corosync_conf")
+    @mock.patch("pcs.lib.env.get_local_corosync_conf")
+    @mock.patch.object(
+        CorosyncConfigFacade,
+        "get_nodes",
+        lambda self: "mock node list"
+    )
+    @mock.patch.object(
+        LibraryEnvironment,
+        "node_communicator",
+        lambda self: "mock node communicator"
+    )
+    @mock.patch.object(
+        LibraryEnvironment,
+        "cmd_runner",
+        lambda self: "mock cmd runner"
+    )
+    def test_corosync_conf_not_set(
+        self, mock_get_corosync, mock_distribute, mock_reload
+    ):
+        corosync_data = open(rc("corosync.conf")).read()
+        new_corosync_data = corosync_data.replace("version: 2", "version: 3")
+        mock_get_corosync.return_value = corosync_data
+        env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        self.assertTrue(env.is_corosync_conf_live)
+
+        self.assertEqual(corosync_data, env.get_corosync_conf_data())
+        self.assertEqual(corosync_data, env.get_corosync_conf().config.export())
+        self.assertEqual(2, mock_get_corosync.call_count)
+
+        env.push_corosync_conf(
+            CorosyncConfigFacade.from_string(new_corosync_data)
+        )
+        mock_distribute.assert_called_once_with(
+            "mock node communicator",
+            self.mock_reporter,
+            "mock node list",
+            new_corosync_data,
+            False
+        )
+        mock_reload.assert_called_once_with("mock cmd runner")
+
+    @mock.patch("pcs.lib.env.check_corosync_offline_on_nodes")
+    @mock.patch("pcs.lib.env.reload_corosync_config")
+    @mock.patch("pcs.lib.env.distribute_corosync_conf")
+    @mock.patch("pcs.lib.env.get_local_corosync_conf")
+    @mock.patch.object(
+        CorosyncConfigFacade,
+        "get_nodes",
+        lambda self: "mock node list"
+    )
+    @mock.patch.object(
+        LibraryEnvironment,
+        "node_communicator",
+        lambda self: "mock node communicator"
+    )
+    def test_corosync_conf_not_set_need_offline_success(
+        self, mock_get_corosync, mock_distribute, mock_reload,
+        mock_check_offline
+    ):
+        corosync_data = open(rc("corosync.conf")).read()
+        new_corosync_data = corosync_data.replace("version: 2", "version: 3")
+        mock_get_corosync.return_value = corosync_data
+        env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        self.assertTrue(env.is_corosync_conf_live)
+
+        self.assertEqual(corosync_data, env.get_corosync_conf_data())
+        self.assertEqual(corosync_data, env.get_corosync_conf().config.export())
+        self.assertEqual(2, mock_get_corosync.call_count)
+
+        conf_facade = CorosyncConfigFacade.from_string(new_corosync_data)
+        conf_facade._need_stopped_cluster = True
+        env.push_corosync_conf(conf_facade)
+        mock_check_offline.assert_called_once_with(
+            "mock node communicator",
+            self.mock_reporter,
+            "mock node list",
+            False
+        )
+        mock_distribute.assert_called_once_with(
+            "mock node communicator",
+            self.mock_reporter,
+            "mock node list",
+            new_corosync_data,
+            False
+        )
+        mock_reload.assert_not_called()
+
+    @mock.patch("pcs.lib.env.check_corosync_offline_on_nodes")
+    @mock.patch("pcs.lib.env.reload_corosync_config")
+    @mock.patch("pcs.lib.env.distribute_corosync_conf")
+    @mock.patch("pcs.lib.env.get_local_corosync_conf")
+    @mock.patch.object(
+        CorosyncConfigFacade,
+        "get_nodes",
+        lambda self: "mock node list"
+    )
+    @mock.patch.object(
+        LibraryEnvironment,
+        "node_communicator",
+        lambda self: "mock node communicator"
+    )
+    def test_corosync_conf_not_set_need_offline_fail(
+        self, mock_get_corosync, mock_distribute, mock_reload,
+        mock_check_offline
+    ):
+        corosync_data = open(rc("corosync.conf")).read()
+        new_corosync_data = corosync_data.replace("version: 2", "version: 3")
+        mock_get_corosync.return_value = corosync_data
+        def raiser(dummy_communicator, dummy_reporter, dummy_nodes, dummy_force):
+            raise LibraryError(
+                reports.corosync_not_running_check_node_error("test node")
+            )
+        mock_check_offline.side_effect = raiser
+        env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+
+        self.assertTrue(env.is_corosync_conf_live)
+
+        self.assertEqual(corosync_data, env.get_corosync_conf_data())
+        self.assertEqual(corosync_data, env.get_corosync_conf().config.export())
+        self.assertEqual(2, mock_get_corosync.call_count)
+
+        conf_facade = CorosyncConfigFacade.from_string(new_corosync_data)
+        conf_facade._need_stopped_cluster = True
+        assert_raise_library_error(
+            lambda: env.push_corosync_conf(conf_facade),
+            (
+                severity.ERROR,
+                report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
+                {"node": "test node"}
+            )
+        )
+        mock_check_offline.assert_called_once_with(
+            "mock node communicator",
+            self.mock_reporter,
+            "mock node list",
+            False
+        )
+        mock_distribute.assert_not_called()
+        mock_reload.assert_not_called()
+
+    @mock.patch("pcs.lib.env.CommandRunner")
+    def test_cmd_runner_no_options(self, mock_runner):
+        expected_runner = mock.MagicMock()
+        mock_runner.return_value = expected_runner
+        env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+        runner = env.cmd_runner()
+        self.assertEqual(expected_runner, runner)
+        mock_runner.assert_called_once_with(
+            self.mock_logger,
+            self.mock_reporter,
+            {}
+        )
+
+    @mock.patch("pcs.lib.env.CommandRunner")
+    def test_cmd_runner_all_options(self, mock_runner):
+        expected_runner = mock.MagicMock()
+        mock_runner.return_value = expected_runner
+        user = "testuser"
+        env = LibraryEnvironment(
+            self.mock_logger,
+            self.mock_reporter,
+            user_login=user
+        )
+        runner = env.cmd_runner()
+        self.assertEqual(expected_runner, runner)
+        mock_runner.assert_called_once_with(
+            self.mock_logger,
+            self.mock_reporter,
+            {"CIB_user": user}
+        )
+
+    @mock.patch("pcs.lib.env.NodeCommunicator")
+    def test_node_communicator_no_options(self, mock_comm):
+        expected_comm = mock.MagicMock()
+        mock_comm.return_value = expected_comm
+        env = LibraryEnvironment(self.mock_logger, self.mock_reporter)
+        comm = env.node_communicator()
+        self.assertEqual(expected_comm, comm)
+        mock_comm.assert_called_once_with(
+            self.mock_logger,
+            self.mock_reporter,
+            {},
+            None,
+            []
+        )
+
+    @mock.patch("pcs.lib.env.NodeCommunicator")
+    def test_node_communicator_all_options(self, mock_comm):
+        expected_comm = mock.MagicMock()
+        mock_comm.return_value = expected_comm
+        user = "testuser"
+        groups = ["some", "group"]
+        tokens = {"node": "token"}
+        env = LibraryEnvironment(
+            self.mock_logger,
+            self.mock_reporter,
+            user_login=user,
+            user_groups=groups,
+            auth_tokens_getter=lambda:tokens
+        )
+        comm = env.node_communicator()
+        self.assertEqual(expected_comm, comm)
+        mock_comm.assert_called_once_with(
+            self.mock_logger,
+            self.mock_reporter,
+            tokens,
+            user,
+            groups
+        )
diff --git a/pcs/test/test_lib_external.py b/pcs/test/test_lib_external.py
new file mode 100644
index 0000000..0e5f8a5
--- /dev/null
+++ b/pcs/test/test_lib_external.py
@@ -0,0 +1,860 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+import os.path
+import logging
+try:
+    # python2
+    from urllib2 import (
+        HTTPError as urllib_HTTPError,
+        URLError as urllib_URLError
+    )
+except ImportError:
+    # python3
+    from urllib.error import (
+        HTTPError as urllib_HTTPError,
+        URLError as urllib_URLError
+    )
+
+from pcs.test.tools.assertions import (
+    assert_raise_library_error,
+    assert_report_item_equal,
+    assert_report_item_list_equal,
+)
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.pcs_mock import mock
+
+from pcs import settings
+from pcs.common import report_codes
+from pcs.lib.errors import ReportItemSeverity as severity
+
+import pcs.lib.external as lib
+
+
+ at mock.patch("subprocess.Popen", autospec=True)
+class CommandRunnerTest(TestCase):
+    def setUp(self):
+        self.mock_logger = mock.MagicMock(logging.Logger)
+        self.mock_reporter = MockLibraryReportProcessor()
+
+    def assert_popen_called_with(self, mock_popen, args, kwargs):
+        self.assertEqual(mock_popen.call_count, 1)
+        real_args, real_kwargs = mock_popen.call_args
+        filtered_kwargs = dict([
+            (name, value) for name, value in real_kwargs.items()
+            if name in kwargs
+        ])
+        self.assertEqual(real_args, (args,))
+        self.assertEqual(filtered_kwargs, kwargs)
+
+    def test_basic(self, mock_popen):
+        expected_output = "expected output"
+        expected_retval = 123
+        command = ["a_command"]
+        command_str = "a_command"
+        mock_process = mock.MagicMock(spec_set=["communicate", "returncode"])
+        mock_process.communicate.return_value = (expected_output, "dummy")
+        mock_process.returncode = expected_retval
+        mock_popen.return_value = mock_process
+
+        runner = lib.CommandRunner(self.mock_logger, self.mock_reporter)
+        real_output, real_retval = runner.run(command)
+
+        self.assertEqual(real_output, expected_output)
+        self.assertEqual(real_retval, expected_retval)
+        mock_process.communicate.assert_called_once_with(None)
+        self.assert_popen_called_with(
+            mock_popen,
+            command,
+            {"env": {}, "stdin": None,}
+        )
+        logger_calls = [
+            mock.call("Running: {0}".format(command_str)),
+            mock.call("""\
+Finished running: {0}
+Return value: {1}
+--Debug Output Start--
+{2}
+--Debug Output End--""".format(command_str, expected_retval, expected_output))
+        ]
+        self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
+        self.mock_logger.debug.assert_has_calls(logger_calls)
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.DEBUG,
+                    report_codes.RUN_EXTERNAL_PROCESS_STARTED,
+                    {
+                        "command": command_str,
+                        "stdin": None,
+                    }
+                ),
+                (
+                    severity.DEBUG,
+                    report_codes.RUN_EXTERNAL_PROCESS_FINISHED,
+                    {
+                        "command": command_str,
+                        "return_value": expected_retval,
+                        "stdout": expected_output,
+                    }
+                )
+            ]
+        )
+
+    def test_env(self, mock_popen):
+        expected_output = "expected output"
+        expected_retval = 123
+        command = ["a_command"]
+        command_str = "a_command"
+        mock_process = mock.MagicMock(spec_set=["communicate", "returncode"])
+        mock_process.communicate.return_value = (expected_output, "dummy")
+        mock_process.returncode = expected_retval
+        mock_popen.return_value = mock_process
+
+        runner = lib.CommandRunner(
+            self.mock_logger,
+            self.mock_reporter,
+            {"a": "a", "b": "b"}
+        )
+        real_output, real_retval = runner.run(
+            command,
+            env_extend={"b": "B", "c": "C"}
+        )
+
+        self.assertEqual(real_output, expected_output)
+        self.assertEqual(real_retval, expected_retval)
+        mock_process.communicate.assert_called_once_with(None)
+        self.assert_popen_called_with(
+            mock_popen,
+            command,
+            {"env": {"a": "a", "b": "b", "c": "C"}, "stdin": None,}
+        )
+        logger_calls = [
+            mock.call("Running: {0}".format(command_str)),
+            mock.call("""\
+Finished running: {0}
+Return value: {1}
+--Debug Output Start--
+{2}
+--Debug Output End--""".format(command_str, expected_retval, expected_output))
+        ]
+        self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
+        self.mock_logger.debug.assert_has_calls(logger_calls)
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.DEBUG,
+                    report_codes.RUN_EXTERNAL_PROCESS_STARTED,
+                    {
+                        "command": command_str,
+                        "stdin": None,
+                    }
+                ),
+                (
+                    severity.DEBUG,
+                    report_codes.RUN_EXTERNAL_PROCESS_FINISHED,
+                    {
+                        "command": command_str,
+                        "return_value": expected_retval,
+                        "stdout": expected_output,
+                    }
+                )
+            ]
+        )
+
+    def test_stdin(self, mock_popen):
+        expected_output = "expected output"
+        expected_retval = 123
+        command = ["a_command"]
+        command_str = "a_command"
+        stdin = "stdin string"
+        mock_process = mock.MagicMock(spec_set=["communicate", "returncode"])
+        mock_process.communicate.return_value = (expected_output, "dummy")
+        mock_process.returncode = expected_retval
+        mock_popen.return_value = mock_process
+
+        runner = lib.CommandRunner(self.mock_logger, self.mock_reporter)
+        real_output, real_retval = runner.run(command, stdin_string=stdin)
+
+        self.assertEqual(real_output, expected_output)
+        self.assertEqual(real_retval, expected_retval)
+        mock_process.communicate.assert_called_once_with(stdin)
+        self.assert_popen_called_with(
+            mock_popen,
+            command,
+            {"env": {}, "stdin": -1}
+        )
+        logger_calls = [
+            mock.call("""\
+Running: {0}
+--Debug Input Start--
+{1}
+--Debug Input End--""".format(command_str, stdin)),
+            mock.call("""\
+Finished running: {0}
+Return value: {1}
+--Debug Output Start--
+{2}
+--Debug Output End--""".format(command_str, expected_retval, expected_output))
+        ]
+        self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
+        self.mock_logger.debug.assert_has_calls(logger_calls)
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.DEBUG,
+                    report_codes.RUN_EXTERNAL_PROCESS_STARTED,
+                    {
+                        "command": command_str,
+                        "stdin": stdin,
+                    }
+                ),
+                (
+                    severity.DEBUG,
+                    report_codes.RUN_EXTERNAL_PROCESS_FINISHED,
+                    {
+                        "command": command_str,
+                        "return_value": expected_retval,
+                        "stdout": expected_output,
+                    }
+                )
+            ]
+        )
+
+    def test_popen_error(self, mock_popen):
+        expected_error = "expected error"
+        command = ["a_command"]
+        command_str = "a_command"
+        mock_process = mock.MagicMock(spec_set=["communicate", "returncode"])
+        exception = OSError()
+        exception.strerror = expected_error
+        mock_popen.side_effect = exception
+
+        runner = lib.CommandRunner(self.mock_logger, self.mock_reporter)
+        assert_raise_library_error(
+            lambda: runner.run(command),
+            (
+                severity.ERROR,
+                report_codes.RUN_EXTERNAL_PROCESS_ERROR,
+                {
+                    "command": command_str,
+                    "reason": expected_error,
+                }
+            )
+        )
+
+        mock_process.communicate.assert_not_called()
+        self.assert_popen_called_with(
+            mock_popen,
+            command,
+            {"env": {}, "stdin": None,}
+        )
+        logger_calls = [
+            mock.call("Running: {0}".format(command_str)),
+        ]
+        self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
+        self.mock_logger.debug.assert_has_calls(logger_calls)
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.DEBUG,
+                    report_codes.RUN_EXTERNAL_PROCESS_STARTED,
+                    {
+                        "command": command_str,
+                        "stdin": None,
+                    }
+                )
+            ]
+        )
+
+    def test_communicate_error(self, mock_popen):
+        expected_error = "expected error"
+        command = ["a_command"]
+        command_str = "a_command"
+        mock_process = mock.MagicMock(spec_set=["communicate", "returncode"])
+        exception = OSError()
+        exception.strerror = expected_error
+        mock_process.communicate.side_effect = exception
+        mock_popen.return_value = mock_process
+
+        runner = lib.CommandRunner(self.mock_logger, self.mock_reporter)
+        assert_raise_library_error(
+            lambda: runner.run(command),
+            (
+                severity.ERROR,
+                report_codes.RUN_EXTERNAL_PROCESS_ERROR,
+                {
+                    "command": command_str,
+                    "reason": expected_error,
+                }
+            )
+        )
+
+        mock_process.communicate.assert_called_once_with(None)
+        self.assert_popen_called_with(
+            mock_popen,
+            command,
+            {"env": {}, "stdin": None,}
+        )
+        logger_calls = [
+            mock.call("Running: {0}".format(command_str)),
+        ]
+        self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
+        self.mock_logger.debug.assert_has_calls(logger_calls)
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.DEBUG,
+                    report_codes.RUN_EXTERNAL_PROCESS_STARTED,
+                    {
+                        "command": command_str,
+                        "stdin": None,
+                    }
+                )
+            ]
+        )
+
+
+ at mock.patch(
+    "pcs.lib.external.NodeCommunicator._NodeCommunicator__get_opener",
+    autospec=True
+)
+class NodeCommunicatorTest(TestCase):
+    def setUp(self):
+        self.mock_logger = mock.MagicMock(logging.Logger)
+        self.mock_reporter = MockLibraryReportProcessor()
+
+    def fixture_response(self, response_code, response_data):
+        response = mock.MagicMock(["getcode", "read"])
+        response.getcode.return_value = response_code
+        response.read.return_value = response_data.encode("utf-8")
+        return response
+
+    def fixture_http_exception(self, response_code, response_data):
+        response = urllib_HTTPError("url", response_code, "msg", [], None)
+        response.read = mock.MagicMock(
+            return_value=response_data.encode("utf-8")
+        )
+        return response
+
+    def fixture_logger_call_send(self, url, data):
+        send_msg = "Sending HTTP Request to: {url}"
+        if data:
+            send_msg += "\n--Debug Input Start--\n{data}\n--Debug Input End--"
+        return mock.call(send_msg.format(url=url, data=data))
+
+    def fixture_logger_calls(self, url, data, response_code, response_data):
+        result_msg = (
+            "Finished calling: {url}\nResponse Code: {code}"
+            + "\n--Debug Response Start--\n{response}\n--Debug Response End--"
+        )
+        return [
+            self.fixture_logger_call_send(url, data),
+            mock.call(result_msg.format(
+                url=url, code=response_code, response=response_data
+            ))
+        ]
+
+    def fixture_report_item_list_send(self, url, data):
+        return [
+            (
+                severity.DEBUG,
+                report_codes.NODE_COMMUNICATION_STARTED,
+                {
+                    "target": url,
+                    "data": data,
+                }
+            )
+        ]
+
+    def fixture_report_item_list(self, url, data, response_code, response_data):
+        return (
+            self.fixture_report_item_list_send(url, data)
+            +
+            [
+                (
+                    severity.DEBUG,
+                    report_codes.NODE_COMMUNICATION_FINISHED,
+                    {
+                        "target": url,
+                        "response_code": response_code,
+                        "response_data": response_data,
+                    }
+                )
+            ]
+        )
+
+    def fixture_url(self, host, request):
+        return "https://{host}:2224/{request}".format(
+            host=host, request=request
+        )
+
+    def test_success(self, mock_get_opener):
+        host = "test_host"
+        request = "test_request"
+        data = '{"key1": "value1", "key2": ["value2a", "value2b"]}'
+        expected_response_code = 200
+        expected_response_data = "expected response data"
+        mock_opener = mock.MagicMock()
+        mock_get_opener.return_value = mock_opener
+        mock_opener.open.return_value = self.fixture_response(
+            expected_response_code, expected_response_data
+        )
+
+        comm = lib.NodeCommunicator(self.mock_logger, self.mock_reporter, {})
+        real_response = comm.call_host(host, request, data)
+        self.assertEqual(expected_response_data, real_response)
+
+        mock_opener.addheaders.append.assert_not_called()
+        mock_opener.open.assert_called_once_with(
+            self.fixture_url(host, request),
+            data.encode("utf-8")
+        )
+        logger_calls = self.fixture_logger_calls(
+            self.fixture_url(host, request),
+            data,
+            expected_response_code,
+            expected_response_data
+        )
+        self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
+        self.mock_logger.debug.assert_has_calls(logger_calls)
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            self.fixture_report_item_list(
+                self.fixture_url(host, request),
+                data,
+                expected_response_code,
+                expected_response_data
+            )
+        )
+
+    def test_ipv6(self, mock_get_opener):
+        host = "cafe::1"
+        request = "test_request"
+        data = None
+        token = "test_token"
+        expected_response_code = 200
+        expected_response_data = "expected response data"
+        mock_opener = mock.MagicMock()
+        mock_get_opener.return_value = mock_opener
+        mock_opener.open.return_value = self.fixture_response(
+            expected_response_code, expected_response_data
+        )
+
+        comm = lib.NodeCommunicator(
+            self.mock_logger,
+            self.mock_reporter,
+            {host: token,}
+        )
+        real_response = comm.call_host(host, request, data)
+        self.assertEqual(expected_response_data, real_response)
+
+        mock_opener.addheaders.append.assert_called_once_with(
+            ("Cookie", "token={0}".format(token))
+        )
+        mock_opener.open.assert_called_once_with(
+            self.fixture_url("[{0}]".format(host), request),
+            data
+        )
+        logger_calls = self.fixture_logger_calls(
+            self.fixture_url("[{0}]".format(host), request),
+            data,
+            expected_response_code,
+            expected_response_data
+        )
+        self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
+        self.mock_logger.debug.assert_has_calls(logger_calls)
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            self.fixture_report_item_list(
+                self.fixture_url("[{0}]".format(host), request),
+                data,
+                expected_response_code,
+                expected_response_data
+            )
+        )
+
+    def test_auth_token(self, mock_get_opener):
+        host = "test_host"
+        token = "test_token"
+        mock_opener = mock.MagicMock()
+        mock_get_opener.return_value = mock_opener
+
+        comm = lib.NodeCommunicator(
+            self.mock_logger,
+            self.mock_reporter,
+            {
+                "some_host": "some_token",
+                host: token,
+                "other_host": "other_token"
+            }
+        )
+        dummy_response = comm.call_host(host, "test_request", None)
+
+        mock_opener.addheaders.append.assert_called_once_with(
+            ("Cookie", "token={0}".format(token))
+        )
+
+    def test_user(self, mock_get_opener):
+        host = "test_host"
+        user = "test_user"
+        mock_opener = mock.MagicMock()
+        mock_get_opener.return_value = mock_opener
+
+        comm = lib.NodeCommunicator(
+            self.mock_logger,
+            self.mock_reporter,
+            {},
+            user=user
+        )
+        dummy_response = comm.call_host(host, "test_request", None)
+
+        mock_opener.addheaders.append.assert_called_once_with(
+            ("Cookie", "CIB_user={0}".format(user))
+        )
+
+    def test_one_group(self, mock_get_opener):
+        host = "test_host"
+        groups = ["group1"]
+        mock_opener = mock.MagicMock()
+        mock_get_opener.return_value = mock_opener
+
+        comm = lib.NodeCommunicator(
+            self.mock_logger,
+            self.mock_reporter,
+            {},
+            groups=groups
+        )
+        dummy_response = comm.call_host(host, "test_request", None)
+
+        mock_opener.addheaders.append.assert_called_once_with(
+            (
+                "Cookie",
+                "CIB_user_groups={0}".format("Z3JvdXAx".encode("utf8"))
+            )
+        )
+
+    def test_all_options(self, mock_get_opener):
+        host = "test_host"
+        token = "test_token"
+        user = "test_user"
+        groups = ["group1", "group2"]
+        mock_opener = mock.MagicMock()
+        mock_get_opener.return_value = mock_opener
+
+        comm = lib.NodeCommunicator(
+            self.mock_logger,
+            self.mock_reporter,
+            {host: token},
+            user, groups
+        )
+        dummy_response = comm.call_host(host, "test_request", None)
+
+        mock_opener.addheaders.append.assert_called_once_with(
+            (
+                "Cookie",
+                "token={token};CIB_user={user};CIB_user_groups={groups}".format(
+                    token=token,
+                    user=user,
+                    groups="Z3JvdXAxIGdyb3VwMg==".encode("utf-8")
+                )
+            )
+        )
+        mock_opener = mock.MagicMock()
+        mock_get_opener.return_value = mock_opener
+
+    def base_test_http_error(self, mock_get_opener, code, exception):
+        host = "test_host"
+        request = "test_request"
+        data = None
+        expected_response_code = code
+        expected_response_data = "expected response data"
+        mock_opener = mock.MagicMock()
+        mock_get_opener.return_value = mock_opener
+        mock_opener.open.side_effect = self.fixture_http_exception(
+            expected_response_code, expected_response_data
+        )
+
+        comm = lib.NodeCommunicator(self.mock_logger, self.mock_reporter, {})
+        self.assertRaises(
+            exception,
+            lambda: comm.call_host(host, request, data)
+        )
+
+        mock_opener.addheaders.append.assert_not_called()
+        mock_opener.open.assert_called_once_with(
+            self.fixture_url(host, request),
+            data
+        )
+        logger_calls = self.fixture_logger_calls(
+            self.fixture_url(host, request),
+            data,
+            expected_response_code,
+            expected_response_data
+        )
+        self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
+        self.mock_logger.debug.assert_has_calls(logger_calls)
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            self.fixture_report_item_list(
+                self.fixture_url(host, request),
+                data,
+                expected_response_code,
+                expected_response_data
+            )
+        )
+
+    def test_no_authenticated(self, mock_get_opener):
+        self.base_test_http_error(
+            mock_get_opener,
+            401,
+            lib.NodeAuthenticationException
+        )
+
+    def test_permission_denied(self, mock_get_opener):
+        self.base_test_http_error(
+            mock_get_opener,
+            403,
+            lib.NodePermissionDeniedException
+        )
+
+    def test_unsupported_command(self, mock_get_opener):
+        self.base_test_http_error(
+            mock_get_opener,
+            404,
+            lib.NodeUnsupportedCommandException
+        )
+
+    def test_other_error(self, mock_get_opener):
+        self.base_test_http_error(
+            mock_get_opener,
+            500,
+            lib.NodeCommunicationException
+        )
+
+    def test_connection_error(self, mock_get_opener):
+        host = "test_host"
+        request = "test_request"
+        data = None
+        expected_reason = "expected reason"
+        mock_opener = mock.MagicMock()
+        mock_get_opener.return_value = mock_opener
+        mock_opener.open.side_effect = urllib_URLError(expected_reason)
+
+        comm = lib.NodeCommunicator(self.mock_logger, self.mock_reporter, {})
+        self.assertRaises(
+            lib.NodeConnectionException,
+            lambda: comm.call_host(host, request, data)
+        )
+
+        mock_opener.addheaders.append.assert_not_called()
+        mock_opener.open.assert_called_once_with(
+            self.fixture_url(host, request),
+            data
+        )
+        logger_calls = [
+            self.fixture_logger_call_send(
+                self.fixture_url(host, request),
+                data
+            ),
+            mock.call(
+                "Unable to connect to {0} ({1})".format(host, expected_reason)
+            )
+        ]
+        self.assertEqual(self.mock_logger.debug.call_count, len(logger_calls))
+        self.mock_logger.debug.assert_has_calls(logger_calls)
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            self.fixture_report_item_list_send(
+                self.fixture_url(host, request),
+                data
+            )
+            +
+            [(
+                severity.DEBUG,
+                report_codes.NODE_COMMUNICATION_NOT_CONNECTED,
+                {
+                    "node": host,
+                    "reason": expected_reason,
+                }
+            )]
+        )
+
+
+class NodeCommunicatorExceptionTransformTest(TestCase):
+    def test_transform_error_401(self):
+        node = "test_node"
+        command = "test_command"
+        reason = "test_reason"
+
+        assert_report_item_equal(
+            lib.node_communicator_exception_to_report_item(
+                lib.NodeAuthenticationException(node, command, reason)
+            ),
+            (
+                severity.ERROR,
+                report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
+                {
+                    "node": node,
+                    "command": command,
+                    "reason": reason,
+                }
+            )
+        )
+
+    def test_transform_error_403(self):
+        node = "test_node"
+        command = "test_command"
+        reason = "test_reason"
+
+        assert_report_item_equal(
+            lib.node_communicator_exception_to_report_item(
+                lib.NodePermissionDeniedException(node, command, reason)
+            ),
+            (
+                severity.ERROR,
+                report_codes.NODE_COMMUNICATION_ERROR_PERMISSION_DENIED,
+                {
+                    "node": node,
+                    "command": command,
+                    "reason": reason,
+                }
+            )
+        )
+
+    def test_transform_error_404(self):
+        node = "test_node"
+        command = "test_command"
+        reason = "test_reason"
+
+        assert_report_item_equal(
+            lib.node_communicator_exception_to_report_item(
+                lib.NodeUnsupportedCommandException(node, command, reason)
+            ),
+            (
+                severity.ERROR,
+                report_codes.NODE_COMMUNICATION_ERROR_UNSUPPORTED_COMMAND,
+                {
+                    "node": node,
+                    "command": command,
+                    "reason": reason,
+                }
+            )
+        )
+
+    def test_transform_error_connecting(self):
+        node = "test_node"
+        command = "test_command"
+        reason = "test_reason"
+
+        assert_report_item_equal(
+            lib.node_communicator_exception_to_report_item(
+                lib.NodeConnectionException(node, command, reason)
+            ),
+            (
+                severity.ERROR,
+                report_codes.NODE_COMMUNICATION_ERROR_UNABLE_TO_CONNECT,
+                {
+                    "node": node,
+                    "command": command,
+                    "reason": reason,
+                }
+            )
+        )
+
+    def test_transform_error_other(self):
+        node = "test_node"
+        command = "test_command"
+        reason = "test_reason"
+
+        assert_report_item_equal(
+            lib.node_communicator_exception_to_report_item(
+                lib.NodeCommunicationException(node, command, reason)
+            ),
+            (
+                severity.ERROR,
+                report_codes.NODE_COMMUNICATION_ERROR,
+                {
+                    "node": node,
+                    "command": command,
+                    "reason": reason,
+                }
+            )
+        )
+
+    def test_unsupported_exception(self):
+        exc = Exception("test")
+        raised = False
+        try:
+            lib.node_communicator_exception_to_report_item(exc)
+        except Exception as e:
+            raised = True
+            self.assertEqual(e, exc)
+        self.assertTrue(raised)
+
+
+class IsCmanClusterTest(TestCase):
+    def template_test(self, is_cman, corosync_output, corosync_retval=0):
+        mock_runner = mock.MagicMock(spec_set=lib.CommandRunner)
+        mock_runner.run.return_value = (corosync_output, corosync_retval)
+        self.assertEqual(is_cman, lib.is_cman_cluster(mock_runner))
+        mock_runner.run.assert_called_once_with([
+            os.path.join(settings.corosync_binaries, "corosync"),
+            "-v"
+        ])
+
+    def test_is_not_cman(self):
+        self.template_test(
+            False,
+            """\
+Corosync Cluster Engine, version '2.3.4'
+Copyright (c) 2006-2009 Red Hat, Inc.
+"""
+        )
+
+    def test_is_cman(self):
+        self.template_test(
+            True,
+            """\
+Corosync Cluster Engine, version '1.4.7'
+Copyright (c) 2006-2009 Red Hat, Inc.
+"""
+        )
+
+    def test_bad_version_format(self):
+        self.template_test(
+            False,
+            """\
+Corosync Cluster Engine, nonsense '2.3.4'
+Copyright (c) 2006-2009 Red Hat, Inc.
+"""
+        )
+
+    def test_no_version(self):
+        self.template_test(
+            False,
+            """\
+Corosync Cluster Engine
+Copyright (c) 2006-2009 Red Hat, Inc.
+"""
+        )
+
+    def test_corosync_error(self):
+        self.template_test(
+            False,
+            """\
+Corosync Cluster Engine, version '1.4.7'
+Copyright (c) 2006-2009 Red Hat, Inc.
+""",
+            1
+        )
diff --git a/pcs/test/test_lib_node.py b/pcs/test/test_lib_node.py
new file mode 100644
index 0000000..19e5a3a
--- /dev/null
+++ b/pcs/test/test_lib_node.py
@@ -0,0 +1,82 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+
+import pcs.lib.node as lib
+
+class NodeAddressesTest(TestCase):
+    def test_properties_all(self):
+        ring0 = "test_ring0"
+        ring1 = "test_ring1"
+        name = "test_name"
+        id = "test_id"
+        node = lib.NodeAddresses(ring0, ring1, name, id)
+        self.assertEqual(ring0, node.ring0)
+        self.assertEqual(ring1, node.ring1)
+        self.assertEqual(name, node.label)
+        self.assertEqual(id, node.id)
+
+    def test_properties_required(self):
+        ring0 = "test_ring0"
+        node = lib.NodeAddresses(ring0)
+        self.assertEqual(ring0, node.ring0)
+        self.assertEqual(None, node.ring1)
+        self.assertEqual(ring0, node.label)
+        self.assertEqual(None, node.id)
+
+
+class NodeAddressesListTest(TestCase):
+    def test_empty(self):
+        nodes = lib.NodeAddressesList()
+        self.assertEqual(0, len(nodes))
+        self.assertEqual([], list(nodes))
+        self.assertRaises(IndexError, lambda: nodes[0])
+
+    def test_append(self):
+        node1 = lib.NodeAddresses("node1")
+        node2 = lib.NodeAddresses("node2")
+        nodes = lib.NodeAddressesList()
+
+        nodes.append(node1)
+        self.assertEqual(1, len(nodes))
+        self.assertEqual([node1], list(nodes))
+        self.assertEqual(node1, nodes[0])
+
+        nodes.append(node2)
+        self.assertEqual(2, len(nodes))
+        self.assertEqual([node1, node2], list(nodes))
+        self.assertEqual(node1, nodes[0])
+        self.assertEqual(node2, nodes[1])
+
+    def test_create_from_empty_list(self):
+        nodes = lib.NodeAddressesList([])
+        self.assertEqual(0, len(nodes))
+        self.assertEqual([], list(nodes))
+        self.assertRaises(IndexError, lambda: nodes[0])
+
+    def test_create_from_list(self):
+        node1 = lib.NodeAddresses("node1")
+        node2 = lib.NodeAddresses("node2")
+        nodes = lib.NodeAddressesList([node1, node2])
+        self.assertEqual(2, len(nodes))
+        self.assertEqual([node1, node2], list(nodes))
+        self.assertEqual(node1, nodes[0])
+        self.assertEqual(node2, nodes[1])
+
+    def test_create_from_node_list(self):
+        node1 = lib.NodeAddresses("node1")
+        node2 = lib.NodeAddresses("node2")
+        node3 = lib.NodeAddresses("node3")
+        nodes_source = lib.NodeAddressesList([node1, node2])
+        nodes = lib.NodeAddressesList(nodes_source)
+        nodes_source.append(node3)
+
+        self.assertEqual(2, len(nodes))
+        self.assertEqual([node1, node2], list(nodes))
+        self.assertEqual(node1, nodes[0])
+        self.assertEqual(node2, nodes[1])
diff --git a/pcs/test/test_lib_nodes_task.py b/pcs/test/test_lib_nodes_task.py
new file mode 100644
index 0000000..cf42069
--- /dev/null
+++ b/pcs/test/test_lib_nodes_task.py
@@ -0,0 +1,452 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+
+from pcs.test.tools.assertions import (
+    assert_raise_library_error,
+    assert_report_item_list_equal,
+)
+from pcs.test.tools.custom_mock import MockLibraryReportProcessor
+from pcs.test.tools.pcs_mock import mock
+
+from pcs.common import report_codes
+from pcs.lib.external import NodeCommunicator, NodeAuthenticationException
+from pcs.lib.node import NodeAddresses, NodeAddressesList
+from pcs.lib.errors import ReportItemSeverity as severity
+
+import pcs.lib.nodes_task as lib
+
+
+class DistributeCorosyncConfTest(TestCase):
+    def setUp(self):
+        self.mock_reporter = MockLibraryReportProcessor()
+        self.mock_communicator = "mock node communicator"
+
+    def assert_set_remote_corosync_conf_call(self, a_call, node_ring0, config):
+        self.assertEqual("set_remote_corosync_conf", a_call[0])
+        self.assertEqual(3, len(a_call[1]))
+        self.assertEqual(self.mock_communicator, a_call[1][0])
+        self.assertEqual(node_ring0, a_call[1][1].ring0)
+        self.assertEqual(config, a_call[1][2])
+        self.assertEqual(0, len(a_call[2]))
+
+    @mock.patch("pcs.lib.nodes_task.corosync_live")
+    def test_success(self, mock_corosync_live):
+        conf_text = "test conf text"
+        nodes = ["node1", "node2"]
+        node_addrs_list = NodeAddressesList(
+            [NodeAddresses(addr) for addr in nodes]
+        )
+        mock_corosync_live.set_remote_corosync_conf = mock.MagicMock()
+
+        lib.distribute_corosync_conf(
+            self.mock_communicator,
+            self.mock_reporter,
+            node_addrs_list,
+            conf_text
+        )
+
+        corosync_live_calls = [
+            mock.call.set_remote_corosync_conf(
+                "mock node communicator", nodes[0], conf_text
+            ),
+            mock.call.set_remote_corosync_conf(
+                "mock node communicator", nodes[1], conf_text
+            ),
+        ]
+        self.assertEqual(
+            len(corosync_live_calls),
+            len(mock_corosync_live.mock_calls)
+        )
+        self.assert_set_remote_corosync_conf_call(
+            mock_corosync_live.mock_calls[0], nodes[0], conf_text
+        )
+        self.assert_set_remote_corosync_conf_call(
+            mock_corosync_live.mock_calls[1], nodes[1], conf_text
+        )
+
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED,
+                    {}
+                ),
+                (
+                    severity.INFO,
+                    report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+                    {"node": nodes[0]}
+                ),
+                (
+                    severity.INFO,
+                    report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+                    {"node": nodes[1]}
+                ),
+            ]
+        )
+
+    @mock.patch("pcs.lib.nodes_task.corosync_live")
+    def test_one_node_down(self, mock_corosync_live):
+        conf_text = "test conf text"
+        nodes = ["node1", "node2"]
+        node_addrs_list = NodeAddressesList(
+            [NodeAddresses(addr) for addr in nodes]
+        )
+        mock_corosync_live.set_remote_corosync_conf = mock.MagicMock()
+        def raiser(comm, node, conf):
+            if node.ring0 == nodes[1]:
+                raise NodeAuthenticationException(
+                    nodes[1], "command", "HTTP error: 401"
+                )
+        mock_corosync_live.set_remote_corosync_conf.side_effect = raiser
+
+        assert_raise_library_error(
+            lambda: lib.distribute_corosync_conf(
+                self.mock_communicator,
+                self.mock_reporter,
+                node_addrs_list,
+                conf_text
+            ),
+            (
+                severity.ERROR,
+                report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
+                {
+                    "node": nodes[1],
+                    "command": "command",
+                    "reason" : "HTTP error: 401",
+                },
+                report_codes.SKIP_OFFLINE_NODES
+            ),
+            (
+                severity.ERROR,
+                report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
+                {
+                    "node": nodes[1],
+                },
+                report_codes.SKIP_OFFLINE_NODES
+            )
+        )
+
+        corosync_live_calls = [
+            mock.call.set_remote_corosync_conf(
+                "mock node communicator", nodes[0], conf_text
+            ),
+            mock.call.set_remote_corosync_conf(
+                "mock node communicator", nodes[1], conf_text
+            ),
+        ]
+        self.assertEqual(
+            len(corosync_live_calls),
+            len(mock_corosync_live.mock_calls)
+        )
+        self.assert_set_remote_corosync_conf_call(
+            mock_corosync_live.mock_calls[0], nodes[0], conf_text
+        )
+        self.assert_set_remote_corosync_conf_call(
+            mock_corosync_live.mock_calls[1], nodes[1], conf_text
+        )
+
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED,
+                    {}
+                ),
+                (
+                    severity.INFO,
+                    report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+                    {"node": nodes[0]}
+                ),
+                (
+                    severity.ERROR,
+                    report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
+                    {
+                        "node": nodes[1],
+                        "command": "command",
+                        "reason" : "HTTP error: 401",
+                    },
+                    report_codes.SKIP_OFFLINE_NODES
+                ),
+                (
+                    severity.ERROR,
+                    report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
+                    {
+                        "node": nodes[1],
+                    },
+                    report_codes.SKIP_OFFLINE_NODES
+                )
+            ]
+        )
+
+    @mock.patch("pcs.lib.nodes_task.corosync_live")
+    def test_one_node_down_forced(self, mock_corosync_live):
+        conf_text = "test conf text"
+        nodes = ["node1", "node2"]
+        node_addrs_list = NodeAddressesList(
+            [NodeAddresses(addr) for addr in nodes]
+        )
+        mock_corosync_live.set_remote_corosync_conf = mock.MagicMock()
+        def raiser(comm, node, conf):
+            if node.ring0 == nodes[1]:
+                raise NodeAuthenticationException(
+                    nodes[1], "command", "HTTP error: 401"
+                )
+        mock_corosync_live.set_remote_corosync_conf.side_effect = raiser
+
+        lib.distribute_corosync_conf(
+            self.mock_communicator,
+            self.mock_reporter,
+            node_addrs_list,
+            conf_text,
+            skip_offline_nodes=True
+        )
+
+        corosync_live_calls = [
+            mock.call.set_remote_corosync_conf(
+                "mock node communicator", nodes[0], conf_text
+            ),
+            mock.call.set_remote_corosync_conf(
+                "mock node communicator", nodes[1], conf_text
+            ),
+        ]
+        self.assertEqual(
+            len(corosync_live_calls),
+            len(mock_corosync_live.mock_calls)
+        )
+        self.assert_set_remote_corosync_conf_call(
+            mock_corosync_live.mock_calls[0], nodes[0], conf_text
+        )
+        self.assert_set_remote_corosync_conf_call(
+            mock_corosync_live.mock_calls[1], nodes[1], conf_text
+        )
+
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.COROSYNC_CONFIG_DISTRIBUTION_STARTED,
+                    {}
+                ),
+                (
+                    severity.INFO,
+                    report_codes.COROSYNC_CONFIG_ACCEPTED_BY_NODE,
+                    {"node": nodes[0]}
+                ),
+                (
+                    severity.WARNING,
+                    report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
+                    {
+                        "node": nodes[1],
+                        "command": "command",
+                        "reason" : "HTTP error: 401",
+                    }
+                ),
+                (
+                    severity.WARNING,
+                    report_codes.COROSYNC_CONFIG_DISTRIBUTION_NODE_ERROR,
+                    {
+                        "node": nodes[1],
+                    }
+                ),
+            ]
+        )
+
+class CheckCorosyncOfflineTest(TestCase):
+    def setUp(self):
+        self.mock_reporter = MockLibraryReportProcessor()
+        self.mock_communicator = mock.MagicMock(NodeCommunicator)
+
+    def test_success(self):
+        nodes = ["node1", "node2"]
+        node_addrs_list = NodeAddressesList(
+            [NodeAddresses(addr) for addr in nodes]
+        )
+        self.mock_communicator.call_node.return_value = '{"corosync": false}'
+
+        lib.check_corosync_offline_on_nodes(
+            self.mock_communicator,
+            self.mock_reporter,
+            node_addrs_list
+        )
+
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED,
+                    {}
+                ),
+                (
+                    severity.INFO,
+                    report_codes.COROSYNC_NOT_RUNNING_ON_NODE,
+                    {"node": nodes[0]}
+                ),
+                (
+                    severity.INFO,
+                    report_codes.COROSYNC_NOT_RUNNING_ON_NODE,
+                    {"node": nodes[1]}
+                ),
+            ]
+        )
+
+    def test_one_node_running(self):
+        nodes = ["node1", "node2"]
+        node_addrs_list = NodeAddressesList(
+            [NodeAddresses(addr) for addr in nodes]
+        )
+        self.mock_communicator.call_node.side_effect = [
+            '{"corosync": false}',
+            '{"corosync": true}',
+        ]
+
+        assert_raise_library_error(
+            lambda: lib.check_corosync_offline_on_nodes(
+                self.mock_communicator,
+                self.mock_reporter,
+                node_addrs_list
+            ),
+            (
+                severity.ERROR,
+                report_codes.COROSYNC_RUNNING_ON_NODE,
+                {
+                    "node": nodes[1],
+                }
+            )
+        )
+
+    def test_json_error(self):
+        nodes = ["node1", "node2"]
+        node_addrs_list = NodeAddressesList(
+            [NodeAddresses(addr) for addr in nodes]
+        )
+        self.mock_communicator.call_node.side_effect = [
+            '{}', # missing key
+            '{', # not valid json
+        ]
+
+        assert_raise_library_error(
+            lambda: lib.check_corosync_offline_on_nodes(
+                self.mock_communicator,
+                self.mock_reporter,
+                node_addrs_list
+            ),
+            (
+                severity.ERROR,
+                report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
+                {
+                    "node": nodes[0],
+                },
+                report_codes.SKIP_OFFLINE_NODES
+            ),
+            (
+                severity.ERROR,
+                report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
+                {
+                    "node": nodes[1],
+                },
+                report_codes.SKIP_OFFLINE_NODES
+            )
+        )
+
+    def test_node_down(self):
+        nodes = ["node1", "node2"]
+        node_addrs_list = NodeAddressesList(
+            [NodeAddresses(addr) for addr in nodes]
+        )
+        def side_effect(node, request, data):
+            if node.ring0 == nodes[1]:
+                raise NodeAuthenticationException(
+                    nodes[1], "command", "HTTP error: 401"
+                )
+            return '{"corosync": false}'
+        self.mock_communicator.call_node.side_effect = side_effect
+
+        assert_raise_library_error(
+            lambda: lib.check_corosync_offline_on_nodes(
+                self.mock_communicator,
+                self.mock_reporter,
+                node_addrs_list
+            ),
+            (
+                severity.ERROR,
+                report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
+                {
+                    "node": nodes[1],
+                    "command": "command",
+                    "reason" : "HTTP error: 401",
+                },
+                report_codes.SKIP_OFFLINE_NODES
+            ),
+            (
+                severity.ERROR,
+                report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
+                {
+                    "node": nodes[1],
+                },
+                report_codes.SKIP_OFFLINE_NODES
+            )
+        )
+
+    def test_errors_forced(self):
+        nodes = ["node1", "node2"]
+        node_addrs_list = NodeAddressesList(
+            [NodeAddresses(addr) for addr in nodes]
+        )
+        def side_effect(node, request, data):
+            if node.ring0 == nodes[1]:
+                raise NodeAuthenticationException(
+                    nodes[1], "command", "HTTP error: 401"
+                )
+            return '{' # invalid json
+        self.mock_communicator.call_node.side_effect = side_effect
+
+        lib.check_corosync_offline_on_nodes(
+            self.mock_communicator,
+            self.mock_reporter,
+            node_addrs_list,
+            skip_offline_nodes=True
+        )
+
+        assert_report_item_list_equal(
+            self.mock_reporter.report_item_list,
+            [
+                (
+                    severity.INFO,
+                    report_codes.COROSYNC_NOT_RUNNING_CHECK_STARTED,
+                    {}
+                ),
+                (
+                    severity.WARNING,
+                    report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
+                    {
+                        "node": nodes[0],
+                    }
+                ),
+                (
+                    severity.WARNING,
+                    report_codes.NODE_COMMUNICATION_ERROR_NOT_AUTHORIZED,
+                    {
+                        "node": nodes[1],
+                        "command": "command",
+                        "reason" : "HTTP error: 401",
+                    }
+                ),
+                (
+                    severity.WARNING,
+                    report_codes.COROSYNC_NOT_RUNNING_CHECK_NODE_ERROR,
+                    {
+                        "node": nodes[1],
+                    }
+                )
+            ]
+        )
diff --git a/pcs/test/test_lib_pacemaker.py b/pcs/test/test_lib_pacemaker.py
new file mode 100644
index 0000000..85d2034
--- /dev/null
+++ b/pcs/test/test_lib_pacemaker.py
@@ -0,0 +1,925 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+import os.path
+
+from pcs.test.tools.assertions import (
+    assert_raise_library_error,
+    assert_xml_equal,
+)
+from pcs.test.tools.misc import get_test_resource as rc
+from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.xml import XmlManipulation
+
+from pcs import settings
+from pcs.common import report_codes
+from pcs.lib import pacemaker as lib
+from pcs.lib.errors import ReportItemSeverity as Severity
+from pcs.lib.external import CommandRunner
+
+
+class LibraryPacemakerTest(TestCase):
+    def path(self, name):
+        return os.path.join(settings.pacemaker_binaries, name)
+
+    def crm_mon_cmd(self):
+        return [self.path("crm_mon"), "--one-shot", "--as-xml", "--inactive"]
+
+class LibraryPacemakerNodeStatusTest(LibraryPacemakerTest):
+    def setUp(self):
+        self.status = XmlManipulation.from_file(rc("crm_mon.minimal.xml"))
+
+    def fixture_get_node_status(self, node_name, node_id):
+        return {
+            "id": node_id,
+            "name": node_name,
+            "type": "member",
+            "online": True,
+            "standby": False,
+            "standby_onfail": False,
+            "maintenance": True,
+            "pending": True,
+            "unclean": False,
+            "shutdown": False,
+            "expected_up": True,
+            "is_dc": True,
+            "resources_running": 7,
+        }
+
+    def fixture_add_node_status(self, node_attrs):
+        xml_attrs = []
+        for name, value in node_attrs.items():
+            if value is True:
+                value = "true"
+            elif value is False:
+                value = "false"
+            xml_attrs.append('{0}="{1}"'.format(name, value))
+        node_xml = "<node {0}/>".format(" ".join(xml_attrs))
+        self.status.append_to_first_tag_name("nodes", node_xml)
+
+class GetClusterStatusXmlTest(LibraryPacemakerTest):
+    def test_success(self):
+        expected_xml = "<xml />"
+        expected_retval = 0
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_xml, expected_retval)
+
+        real_xml = lib.get_cluster_status_xml(mock_runner)
+
+        mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
+        self.assertEqual(expected_xml, real_xml)
+
+    def test_error(self):
+        expected_error = "some error"
+        expected_retval = 1
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_error, expected_retval)
+
+        assert_raise_library_error(
+            lambda: lib.get_cluster_status_xml(mock_runner),
+            (
+                Severity.ERROR,
+                report_codes.CRM_MON_ERROR,
+                {
+                    "return_value": expected_retval,
+                    "stdout": expected_error,
+                }
+            )
+        )
+
+        mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
+
+class GetCibXmlTest(LibraryPacemakerTest):
+    def test_success(self):
+        expected_xml = "<xml />"
+        expected_retval = 0
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_xml, expected_retval)
+
+        real_xml = lib.get_cib_xml(mock_runner)
+
+        mock_runner.run.assert_called_once_with(
+            [self.path("cibadmin"), "--local", "--query"]
+        )
+        self.assertEqual(expected_xml, real_xml)
+
+    def test_error(self):
+        expected_error = "some error"
+        expected_retval = 1
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_error, expected_retval)
+
+        assert_raise_library_error(
+            lambda: lib.get_cib_xml(mock_runner),
+            (
+                Severity.ERROR,
+                report_codes.CIB_LOAD_ERROR,
+                {
+                    "return_value": expected_retval,
+                    "stdout": expected_error,
+                }
+            )
+        )
+
+        mock_runner.run.assert_called_once_with(
+            [self.path("cibadmin"), "--local", "--query"]
+        )
+
+    def test_success_scope(self):
+        expected_xml = "<xml />"
+        expected_retval = 0
+        scope = "test_scope"
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_xml, expected_retval)
+
+        real_xml = lib.get_cib_xml(mock_runner, scope)
+
+        mock_runner.run.assert_called_once_with(
+            [
+                self.path("cibadmin"),
+                "--local", "--query", "--scope={0}".format(scope)
+            ]
+        )
+        self.assertEqual(expected_xml, real_xml)
+
+    def test_scope_error(self):
+        expected_error = "some error"
+        expected_retval = 6
+        scope = "test_scope"
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_error, expected_retval)
+
+        assert_raise_library_error(
+            lambda: lib.get_cib_xml(mock_runner, scope=scope),
+            (
+                Severity.ERROR,
+                report_codes.CIB_LOAD_ERROR_SCOPE_MISSING,
+                {
+                    "scope": scope,
+                    "return_value": expected_retval,
+                    "stdout": expected_error,
+                }
+            )
+        )
+
+        mock_runner.run.assert_called_once_with(
+            [
+                self.path("cibadmin"),
+                "--local", "--query", "--scope={0}".format(scope)
+            ]
+        )
+
+class GetCibTest(LibraryPacemakerTest):
+    def test_success(self):
+        xml = "<xml />"
+        assert_xml_equal(xml, str(XmlManipulation((lib.get_cib(xml)))))
+
+    def test_invalid_xml(self):
+        xml = "<invalid><xml />"
+        assert_raise_library_error(
+            lambda: lib.get_cib(xml),
+            (
+                Severity.ERROR,
+                report_codes.CIB_LOAD_ERROR_BAD_FORMAT,
+                {
+                }
+            )
+        )
+
+class ReplaceCibConfigurationTest(LibraryPacemakerTest):
+    def test_success(self):
+        xml = "<xml/>"
+        expected_output = "expected output"
+        expected_retval = 0
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_output, expected_retval)
+
+        lib.replace_cib_configuration(
+            mock_runner,
+            XmlManipulation.from_str(xml).tree
+        )
+
+        mock_runner.run.assert_called_once_with(
+            [
+                self.path("cibadmin"), "--replace", "--scope", "configuration",
+                "--verbose", "--xml-pipe"
+            ],
+            stdin_string=xml
+        )
+
+    def test_error(self):
+        xml = "<xml/>"
+        expected_error = "expected error"
+        expected_retval = 1
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_error, expected_retval)
+
+        assert_raise_library_error(
+            lambda: lib.replace_cib_configuration(
+                    mock_runner,
+                    XmlManipulation.from_str(xml).tree
+                )
+            ,
+            (
+                Severity.ERROR,
+                report_codes.CIB_PUSH_ERROR,
+                {
+                    "return_value": expected_retval,
+                    "stdout": expected_error,
+                }
+            )
+        )
+
+        mock_runner.run.assert_called_once_with(
+            [
+                self.path("cibadmin"), "--replace", "--scope", "configuration",
+                "--verbose", "--xml-pipe"
+            ],
+            stdin_string=xml
+        )
+
+class GetLocalNodeStatusTest(LibraryPacemakerNodeStatusTest):
+    def test_offline(self):
+        expected_error = "some error"
+        expected_retval = 1
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_error, expected_retval)
+
+        self.assertEqual(
+            {"offline": True},
+            lib.get_local_node_status(mock_runner)
+        )
+        mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
+
+    def test_invalid_status(self):
+        expected_xml = "some error"
+        expected_retval = 0
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_xml, expected_retval)
+
+        assert_raise_library_error(
+            lambda: lib.get_local_node_status(mock_runner),
+            (
+                Severity.ERROR,
+                report_codes.BAD_CLUSTER_STATE_FORMAT,
+                {}
+            )
+        )
+        mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
+
+    def test_success(self):
+        node_id = "id_1"
+        node_name = "name_1"
+        node_status = self.fixture_get_node_status(node_name, node_id)
+        expected_status = dict(node_status, offline=False)
+        self.fixture_add_node_status(
+            self.fixture_get_node_status("name_2", "id_2")
+        )
+        self.fixture_add_node_status(node_status)
+        self.fixture_add_node_status(
+            self.fixture_get_node_status("name_3", "id_3")
+        )
+
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        call_list = [
+            mock.call(self.crm_mon_cmd()),
+            mock.call([self.path("crm_node"), "--cluster-id"]),
+            mock.call(
+                [self.path("crm_node"), "--name-for-id={0}".format(node_id)]
+            ),
+        ]
+        return_value_list = [
+            (str(self.status), 0),
+            (node_id, 0),
+            (node_name, 0)
+        ]
+        mock_runner.run.side_effect = return_value_list
+
+        real_status = lib.get_local_node_status(mock_runner)
+
+        self.assertEqual(len(return_value_list), len(call_list))
+        self.assertEqual(len(return_value_list), mock_runner.run.call_count)
+        mock_runner.run.assert_has_calls(call_list)
+        self.assertEqual(expected_status, real_status)
+
+    def test_node_not_in_status(self):
+        node_id = "id_1"
+        node_name = "name_1"
+        node_name_bad = "name_X"
+        node_status = self.fixture_get_node_status(node_name, node_id)
+        self.fixture_add_node_status(node_status)
+
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        call_list = [
+            mock.call(self.crm_mon_cmd()),
+            mock.call([self.path("crm_node"), "--cluster-id"]),
+            mock.call(
+                [self.path("crm_node"), "--name-for-id={0}".format(node_id)]
+            ),
+        ]
+        return_value_list = [
+            (str(self.status), 0),
+            (node_id, 0),
+            (node_name_bad, 0)
+        ]
+        mock_runner.run.side_effect = return_value_list
+
+        assert_raise_library_error(
+            lambda: lib.get_local_node_status(mock_runner),
+            (
+                Severity.ERROR,
+                report_codes.NODE_NOT_FOUND,
+                {"node": node_name_bad}
+            )
+        )
+
+        self.assertEqual(len(return_value_list), len(call_list))
+        self.assertEqual(len(return_value_list), mock_runner.run.call_count)
+        mock_runner.run.assert_has_calls(call_list)
+
+    def test_error_1(self):
+        node_id = "id_1"
+        node_name = "name_1"
+        node_status = self.fixture_get_node_status(node_name, node_id)
+        self.fixture_add_node_status(node_status)
+
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        call_list = [
+            mock.call(self.crm_mon_cmd()),
+            mock.call([self.path("crm_node"), "--cluster-id"]),
+        ]
+        return_value_list = [
+            (str(self.status), 0),
+            ("some error", 1),
+        ]
+        mock_runner.run.side_effect = return_value_list
+
+        assert_raise_library_error(
+            lambda: lib.get_local_node_status(mock_runner),
+            (
+                Severity.ERROR,
+                report_codes.PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND,
+                {"reason": "node id not found"}
+            )
+        )
+
+        self.assertEqual(len(return_value_list), len(call_list))
+        self.assertEqual(len(return_value_list), mock_runner.run.call_count)
+        mock_runner.run.assert_has_calls(call_list)
+
+    def test_error_2(self):
+        node_id = "id_1"
+        node_name = "name_1"
+        node_status = self.fixture_get_node_status(node_name, node_id)
+        self.fixture_add_node_status(node_status)
+
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        call_list = [
+            mock.call(self.crm_mon_cmd()),
+            mock.call([self.path("crm_node"), "--cluster-id"]),
+            mock.call(
+                [self.path("crm_node"), "--name-for-id={0}".format(node_id)]
+            ),
+        ]
+        return_value_list = [
+            (str(self.status), 0),
+            (node_id, 0),
+            ("some error", 1),
+        ]
+        mock_runner.run.side_effect = return_value_list
+
+        assert_raise_library_error(
+            lambda: lib.get_local_node_status(mock_runner),
+            (
+                Severity.ERROR,
+                report_codes.PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND,
+                {"reason": "node name not found"}
+            )
+        )
+
+        self.assertEqual(len(return_value_list), len(call_list))
+        self.assertEqual(len(return_value_list), mock_runner.run.call_count)
+        mock_runner.run.assert_has_calls(call_list)
+
+    def test_error_3(self):
+        node_id = "id_1"
+        node_name = "name_1"
+        node_status = self.fixture_get_node_status(node_name, node_id)
+        self.fixture_add_node_status(node_status)
+
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        call_list = [
+            mock.call(self.crm_mon_cmd()),
+            mock.call([self.path("crm_node"), "--cluster-id"]),
+            mock.call(
+                [self.path("crm_node"), "--name-for-id={0}".format(node_id)]
+            ),
+        ]
+        return_value_list = [
+            (str(self.status), 0),
+            (node_id, 0),
+            ("(null)", 0),
+        ]
+        mock_runner.run.side_effect = return_value_list
+
+        assert_raise_library_error(
+            lambda: lib.get_local_node_status(mock_runner),
+            (
+                Severity.ERROR,
+                report_codes.PACEMAKER_LOCAL_NODE_NAME_NOT_FOUND,
+                {"reason": "node name is null"}
+            )
+        )
+
+        self.assertEqual(len(return_value_list), len(call_list))
+        self.assertEqual(len(return_value_list), mock_runner.run.call_count)
+        mock_runner.run.assert_has_calls(call_list)
+
+class ResourceCleanupTest(LibraryPacemakerTest):
+    def fixture_status_xml(self, nodes, resources):
+        xml_man = XmlManipulation.from_file(rc("crm_mon.minimal.xml"))
+        doc = xml_man.tree.getroottree()
+        doc.find("/summary/nodes_configured").set("number", str(nodes))
+        doc.find("/summary/resources_configured").set("number", str(resources))
+        return str(XmlManipulation(doc))
+
+    def test_basic(self):
+        expected_output = "expected output"
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        call_list = [
+            mock.call(self.crm_mon_cmd()),
+            mock.call([self.path("crm_resource"), "--cleanup"]),
+        ]
+        return_value_list = [
+            (self.fixture_status_xml(1, 1), 0),
+            (expected_output, 0),
+        ]
+        mock_runner.run.side_effect = return_value_list
+
+        real_output = lib.resource_cleanup(mock_runner)
+
+        self.assertEqual(len(return_value_list), len(call_list))
+        self.assertEqual(len(return_value_list), mock_runner.run.call_count)
+        mock_runner.run.assert_has_calls(call_list)
+        self.assertEqual(expected_output, real_output)
+
+    def test_threshold_exceeded(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (self.fixture_status_xml(1000, 1000), 0)
+
+        assert_raise_library_error(
+            lambda: lib.resource_cleanup(mock_runner),
+            (
+                Severity.ERROR,
+                report_codes.RESOURCE_CLEANUP_TOO_TIME_CONSUMING,
+                {"threshold": 100},
+                report_codes.FORCE_LOAD_THRESHOLD
+            )
+        )
+
+        mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
+
+    def test_forced(self):
+        expected_output = "expected output"
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_output, 0)
+
+        real_output = lib.resource_cleanup(mock_runner, force=True)
+
+        mock_runner.run.assert_called_once_with(
+            [self.path("crm_resource"), "--cleanup"]
+        )
+        self.assertEqual(expected_output, real_output)
+
+    def test_resource(self):
+        resource = "test_resource"
+        expected_output = "expected output"
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_output, 0)
+
+        real_output = lib.resource_cleanup(mock_runner, resource=resource)
+
+        mock_runner.run.assert_called_once_with(
+            [self.path("crm_resource"), "--cleanup", "--resource", resource]
+        )
+        self.assertEqual(expected_output, real_output)
+
+    def test_node(self):
+        node = "test_node"
+        expected_output = "expected output"
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_output, 0)
+
+        real_output = lib.resource_cleanup(mock_runner, node=node)
+
+        mock_runner.run.assert_called_once_with(
+            [self.path("crm_resource"), "--cleanup", "--node", node]
+        )
+        self.assertEqual(expected_output, real_output)
+
+    def test_node_and_resource(self):
+        node = "test_node"
+        resource = "test_resource"
+        expected_output = "expected output"
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_output, 0)
+
+        real_output = lib.resource_cleanup(
+            mock_runner, resource=resource, node=node
+        )
+
+        mock_runner.run.assert_called_once_with(
+            [
+                self.path("crm_resource"),
+                "--cleanup", "--resource", resource, "--node", node
+            ]
+        )
+        self.assertEqual(expected_output, real_output)
+
+    def test_error_state(self):
+        expected_error = "some error"
+        expected_retval = 1
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_error, expected_retval)
+
+        assert_raise_library_error(
+            lambda: lib.resource_cleanup(mock_runner),
+            (
+                Severity.ERROR,
+                report_codes.CRM_MON_ERROR,
+                {
+                    "return_value": expected_retval,
+                    "stdout": expected_error,
+                }
+            )
+        )
+
+        mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
+
+    def test_error_cleanup(self):
+        expected_error = "expected error"
+        expected_retval = 1
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        call_list = [
+            mock.call(self.crm_mon_cmd()),
+            mock.call([self.path("crm_resource"), "--cleanup"]),
+        ]
+        return_value_list = [
+            (self.fixture_status_xml(1, 1), 0),
+            (expected_error, expected_retval),
+        ]
+        mock_runner.run.side_effect = return_value_list
+
+        assert_raise_library_error(
+            lambda: lib.resource_cleanup(mock_runner),
+            (
+                Severity.ERROR,
+                report_codes.RESOURCE_CLEANUP_ERROR,
+                {
+                    "return_value": expected_retval,
+                    "stdout": expected_error,
+                }
+            )
+        )
+
+        self.assertEqual(len(return_value_list), len(call_list))
+        self.assertEqual(len(return_value_list), mock_runner.run.call_count)
+        mock_runner.run.assert_has_calls(call_list)
+
+class ResourcesWaitingTest(LibraryPacemakerTest):
+    def test_has_support(self):
+        expected_output = "something --wait something else"
+        expected_retval = 1
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_output, expected_retval)
+
+        self.assertTrue(
+            lib.has_resource_wait_support(mock_runner)
+        )
+        mock_runner.run.assert_called_once_with(
+            [self.path("crm_resource"), "-?"]
+        )
+
+    def test_doesnt_have_support(self):
+        expected_output = "something something else"
+        expected_retval = 1
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_output, expected_retval)
+
+        self.assertFalse(
+            lib.has_resource_wait_support(mock_runner)
+        )
+        mock_runner.run.assert_called_once_with(
+            [self.path("crm_resource"), "-?"]
+        )
+
+    @mock.patch("pcs.lib.pacemaker.has_resource_wait_support", autospec=True)
+    def test_ensure_support_success(self, mock_obj):
+        mock_obj.return_value = True
+        self.assertEqual(None, lib.ensure_resource_wait_support(mock.Mock()))
+
+    @mock.patch("pcs.lib.pacemaker.has_resource_wait_support", autospec=True)
+    def test_ensure_support_error(self, mock_obj):
+        mock_obj.return_value = False
+        assert_raise_library_error(
+            lambda: lib.ensure_resource_wait_support(mock.Mock()),
+            (
+                Severity.ERROR,
+                report_codes.RESOURCE_WAIT_NOT_SUPPORTED,
+                {}
+            )
+        )
+
+    def test_wait_success(self):
+        expected_output = "expected output"
+        expected_retval = 0
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_output, expected_retval)
+
+        self.assertEqual(None, lib.wait_for_resources(mock_runner))
+
+        mock_runner.run.assert_called_once_with(
+            [self.path("crm_resource"), "--wait"]
+        )
+
+    def test_wait_timeout_success(self):
+        expected_output = "expected output"
+        expected_retval = 0
+        timeout = 10
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_output, expected_retval)
+
+        self.assertEqual(None, lib.wait_for_resources(mock_runner, timeout))
+
+        mock_runner.run.assert_called_once_with(
+            [
+                self.path("crm_resource"),
+                "--wait", "--timeout={0}".format(timeout)
+            ]
+        )
+
+    def test_wait_error(self):
+        expected_error = "some error"
+        expected_retval = 1
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_error, expected_retval)
+
+        assert_raise_library_error(
+            lambda: lib.wait_for_resources(mock_runner),
+            (
+                Severity.ERROR,
+                report_codes.RESOURCE_WAIT_ERROR,
+                {
+                    "return_value": expected_retval,
+                    "stdout": expected_error,
+                }
+            )
+        )
+
+        mock_runner.run.assert_called_once_with(
+            [self.path("crm_resource"), "--wait"]
+        )
+
+    def test_wait_error_timeout(self):
+        expected_error = "some error"
+        expected_retval = 62
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_error, expected_retval)
+
+        assert_raise_library_error(
+            lambda: lib.wait_for_resources(mock_runner),
+            (
+                Severity.ERROR,
+                report_codes.RESOURCE_WAIT_TIMED_OUT,
+                {
+                    "return_value": expected_retval,
+                    "stdout": expected_error,
+                }
+            )
+        )
+
+        mock_runner.run.assert_called_once_with(
+            [self.path("crm_resource"), "--wait"]
+        )
+
+class NodeStandbyTest(LibraryPacemakerNodeStatusTest):
+    def test_standby_local(self):
+        expected_retval = 0
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = ("dummy", expected_retval)
+
+        output = lib.nodes_standby(mock_runner)
+
+        mock_runner.run.assert_called_once_with(
+            [self.path("crm_standby"), "-v", "on"]
+        )
+        self.assertEqual(None, output)
+
+    def test_unstandby_local(self):
+        expected_retval = 0
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = ("dummy", expected_retval)
+
+        output = lib.nodes_unstandby(mock_runner)
+
+        mock_runner.run.assert_called_once_with(
+            [self.path("crm_standby"), "-D"]
+        )
+        self.assertEqual(None, output)
+
+    def test_standby_all(self):
+        nodes = ("node1", "node2", "node3")
+        for i, n in enumerate(nodes, 1):
+            self.fixture_add_node_status(
+                self.fixture_get_node_status(n, i)
+            )
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        call_list = [mock.call(self.crm_mon_cmd())]
+        call_list += [
+            mock.call([self.path("crm_standby"), "-v", "on", "-N", n])
+            for n in nodes
+        ]
+        return_value_list = [(str(self.status), 0)]
+        return_value_list += [("dummy", 0) for n in nodes]
+        mock_runner.run.side_effect = return_value_list
+
+        output = lib.nodes_standby(mock_runner, all_nodes=True)
+
+        self.assertEqual(len(return_value_list), len(call_list))
+        self.assertEqual(len(return_value_list), mock_runner.run.call_count)
+        mock_runner.run.assert_has_calls(call_list)
+        self.assertEqual(None, output)
+
+    def test_unstandby_all(self):
+        nodes = ("node1", "node2", "node3")
+        for i, n in enumerate(nodes, 1):
+            self.fixture_add_node_status(
+                self.fixture_get_node_status(n, i)
+            )
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        call_list = [mock.call(self.crm_mon_cmd())]
+        call_list += [
+            mock.call([self.path("crm_standby"), "-D", "-N", n])
+            for n in nodes
+        ]
+        return_value_list = [(str(self.status), 0)]
+        return_value_list += [("dummy", 0) for n in nodes]
+        mock_runner.run.side_effect = return_value_list
+
+        output = lib.nodes_unstandby(mock_runner, all_nodes=True)
+
+        self.assertEqual(len(return_value_list), len(call_list))
+        self.assertEqual(len(return_value_list), mock_runner.run.call_count)
+        mock_runner.run.assert_has_calls(call_list)
+        self.assertEqual(None, output)
+
+    def test_standby_nodes(self):
+        nodes = ("node1", "node2", "node3")
+        for i, n in enumerate(nodes, 1):
+            self.fixture_add_node_status(
+                self.fixture_get_node_status(n, i)
+            )
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        call_list = [mock.call(self.crm_mon_cmd())]
+        call_list += [
+            mock.call([self.path("crm_standby"), "-v", "on", "-N", n])
+            for n in nodes[1:]
+        ]
+        return_value_list = [(str(self.status), 0)]
+        return_value_list += [("dummy", 0) for n in nodes[1:]]
+        mock_runner.run.side_effect = return_value_list
+
+        output = lib.nodes_standby(mock_runner, node_list=nodes[1:])
+
+        self.assertEqual(len(return_value_list), len(call_list))
+        self.assertEqual(len(return_value_list), mock_runner.run.call_count)
+        mock_runner.run.assert_has_calls(call_list)
+        self.assertEqual(None, output)
+
+    def test_unstandby_nodes(self):
+        nodes = ("node1", "node2", "node3")
+        for i, n in enumerate(nodes, 1):
+            self.fixture_add_node_status(
+                self.fixture_get_node_status(n, i)
+            )
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        call_list = [mock.call(self.crm_mon_cmd())]
+        call_list += [
+            mock.call([self.path("crm_standby"), "-D", "-N", n])
+            for n in nodes[:2]
+        ]
+        return_value_list = [(str(self.status), 0)]
+        return_value_list += [("dummy", 0) for n in nodes[:2]]
+        mock_runner.run.side_effect = return_value_list
+
+        output = lib.nodes_unstandby(mock_runner, node_list=nodes[:2])
+
+        self.assertEqual(len(return_value_list), len(call_list))
+        self.assertEqual(len(return_value_list), mock_runner.run.call_count)
+        mock_runner.run.assert_has_calls(call_list)
+        self.assertEqual(None, output)
+
+    def test_standby_unknown_node(self):
+        self.fixture_add_node_status(
+            self.fixture_get_node_status("node_1", "id_1")
+        )
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (str(self.status), 0)
+
+        assert_raise_library_error(
+            lambda: lib.nodes_standby(mock_runner, ["node_2"]),
+            (
+                Severity.ERROR,
+                report_codes.NODE_NOT_FOUND,
+                {"node": "node_2"}
+            )
+        )
+
+        mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
+
+    def test_unstandby_unknown_node(self):
+        self.fixture_add_node_status(
+            self.fixture_get_node_status("node_1", "id_1")
+        )
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (str(self.status), 0)
+
+        assert_raise_library_error(
+            lambda: lib.nodes_unstandby(mock_runner, ["node_2", "node_3"]),
+            (
+                Severity.ERROR,
+                report_codes.NODE_NOT_FOUND,
+                {"node": "node_2"}
+            ),
+            (
+                Severity.ERROR,
+                report_codes.NODE_NOT_FOUND,
+                {"node": "node_3"}
+            )
+        )
+
+        mock_runner.run.assert_called_once_with(self.crm_mon_cmd())
+
+    def test_error_one_node(self):
+        expected_error = "some error"
+        expected_retval = 1
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (expected_error, expected_retval)
+
+        assert_raise_library_error(
+            lambda: lib.nodes_unstandby(mock_runner),
+            (
+                Severity.ERROR,
+                report_codes.COMMON_ERROR,
+                {}
+            )
+        )
+
+        mock_runner.run.assert_called_once_with(
+            [self.path("crm_standby"), "-D"]
+        )
+
+    def test_error_some_nodes(self):
+        nodes = ("node1", "node2", "node3", "node4")
+        for i, n in enumerate(nodes, 1):
+            self.fixture_add_node_status(
+                self.fixture_get_node_status(n, i)
+            )
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        call_list = [mock.call(self.crm_mon_cmd())]
+        call_list += [
+            mock.call([self.path("crm_standby"), "-v", "on", "-N", n])
+            for n in nodes
+        ]
+        return_value_list = [
+            (str(self.status), 0),
+            ("dummy1", 0),
+            ("dummy2", 1),
+            ("dummy3", 0),
+            ("dummy4", 1),
+        ]
+        mock_runner.run.side_effect = return_value_list
+
+        assert_raise_library_error(
+            lambda: lib.nodes_standby(mock_runner, all_nodes=True),
+            (
+                Severity.ERROR,
+                report_codes.COMMON_ERROR,
+                {}
+            ),
+            (
+                Severity.ERROR,
+                report_codes.COMMON_ERROR,
+                {}
+            )
+        )
+
+        self.assertEqual(len(return_value_list), len(call_list))
+        self.assertEqual(len(return_value_list), mock_runner.run.call_count)
+        mock_runner.run.assert_has_calls(call_list)
+
diff --git a/pcs/test/test_lib_pacemaker_state.py b/pcs/test/test_lib_pacemaker_state.py
new file mode 100644
index 0000000..54f536d
--- /dev/null
+++ b/pcs/test/test_lib_pacemaker_state.py
@@ -0,0 +1,154 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+from lxml import etree
+
+from pcs.test.tools.assertions import assert_raise_library_error
+from pcs.test.tools.misc import get_test_resource as rc
+from pcs.test.tools.xml import get_xml_manipulation_creator_from_file
+
+from pcs.lib.pacemaker_state import (
+    ClusterState,
+    _Attrs,
+    _Children,
+)
+
+from pcs.common import report_codes
+from pcs.lib.errors import ReportItemSeverity as severities
+
+class AttrsTest(TestCase):
+    def test_get_declared_attr(self):
+        attrs = _Attrs('test', {'node-name': 'node1'}, {'name': 'node-name'})
+        self.assertEqual('node1', attrs.name)
+
+    def test_raises_on_undeclared_attribute(self):
+        attrs = _Attrs('test', {'node-name': 'node1'}, {})
+        self.assertRaises(AttributeError, lambda: attrs.name)
+
+    def test_raises_on_missing_required_attribute(self):
+        attrs = _Attrs('test', {}, {'name': 'node-name'})
+        self.assertRaises(AttributeError, lambda: attrs.name)
+
+    def test_attr_transformation_success(self):
+        attrs = _Attrs('test', {'number': '7'}, {'count': ('number', int)})
+        self.assertEqual(7, attrs.count)
+
+    def test_attr_transformation_fail(self):
+        attrs = _Attrs('test', {'number': 'abc'}, {'count': ('number', int)})
+        self.assertRaises(ValueError, lambda: attrs.count)
+
+class ChildrenTest(TestCase):
+    def setUp(self):
+        self.dom = etree.fromstring(
+            '<main><some name="0"/><any name="1"/><any name="2"/></main>'
+        )
+
+    def wrap(self, element):
+        return '{0}.{1}'.format(element.tag, element.attrib['name'])
+
+    def test_get_declared_section(self):
+        children = _Children(
+            'test', self.dom, {}, {'some_section': ('some', self.wrap)}
+        )
+        self.assertEqual('some.0', children.some_section)
+
+    def test_get_declared_children(self):
+        children = _Children('test', self.dom, {'anys': ('any', self.wrap)}, {})
+        self.assertEqual(['any.1', 'any.2'], children.anys)
+
+    def test_raises_on_undeclared_children(self):
+        children = _Children('test', self.dom, {}, {})
+        self.assertRaises(AttributeError, lambda: children.some_section)
+
+
+class TestBase(TestCase):
+    def setUp(self):
+        self.create_covered_status = get_xml_manipulation_creator_from_file(
+            rc('crm_mon.minimal.xml')
+        )
+        self.covered_status = self.create_covered_status()
+
+class ClusterStatusTest(TestBase):
+    def test_minimal_crm_mon_is_valid(self):
+        ClusterState(str(self.covered_status))
+
+    def test_refuse_invalid_xml(self):
+        assert_raise_library_error(
+            lambda: ClusterState('invalid xml'),
+            (severities.ERROR, report_codes.BAD_CLUSTER_STATE_FORMAT, {})
+        )
+
+    def test_refuse_invalid_document(self):
+        self.covered_status.append_to_first_tag_name(
+            'nodes',
+            '<node without="required attributes" />'
+        )
+
+        assert_raise_library_error(
+            lambda: ClusterState(str(self.covered_status)),
+            (severities.ERROR, report_codes.BAD_CLUSTER_STATE_FORMAT, {})
+        )
+
+
+class WorkWithClusterStatusNodesTest(TestBase):
+    def fixture_node_string(self, **kwargs):
+        attrs = dict(name='name', id='id', type='member')
+        attrs.update(kwargs)
+        return '''<node
+            name="{name}"
+            id="{id}"
+            online="true"
+            standby="true"
+            standby_onfail="false"
+            maintenance="false"
+            pending="false"
+            unclean="false"
+            shutdown="false"
+            expected_up="false"
+            is_dc="false"
+            resources_running="0"
+            type="{type}"
+        />'''.format(**attrs)
+
+    def test_can_get_node_names(self):
+        self.covered_status.append_to_first_tag_name(
+            'nodes',
+            self.fixture_node_string(name='node1', id='1'),
+            self.fixture_node_string(name='node2', id='2'),
+        )
+        xml = str(self.covered_status)
+        self.assertEqual(
+            ['node1', 'node2'],
+            [node.attrs.name for node in ClusterState(xml).node_section.nodes]
+        )
+
+    def test_can_filter_out_remote_nodes(self):
+        self.covered_status.append_to_first_tag_name(
+            'nodes',
+            self.fixture_node_string(name='node1', id='1'),
+            self.fixture_node_string(name='node2', type='remote', id='2'),
+        )
+        xml = str(self.covered_status)
+        self.assertEqual(
+            ['node1'],
+            [
+                node.attrs.name
+                for node in ClusterState(xml).node_section.nodes
+                if node.attrs.type != 'remote'
+            ]
+        )
+
+
+class WorkWithClusterStatusSummaryTest(TestBase):
+    def test_nodes_count(self):
+        xml = str(self.covered_status)
+        self.assertEqual(0, ClusterState(xml).summary.nodes.attrs.count)
+
+    def test_resources_count(self):
+        xml = str(self.covered_status)
+        self.assertEqual(0, ClusterState(xml).summary.resources.attrs.count)
diff --git a/pcs/test/test_lib_resource_agent.py b/pcs/test/test_lib_resource_agent.py
new file mode 100644
index 0000000..707df55
--- /dev/null
+++ b/pcs/test/test_lib_resource_agent.py
@@ -0,0 +1,986 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from unittest import TestCase
+import os.path
+
+from lxml import etree
+
+from pcs.test.tools.assertions import (
+    assert_raise_library_error,
+    assert_xml_equal,
+)
+from pcs.test.tools.pcs_mock import mock
+from pcs.test.tools.xml import XmlManipulation as XmlMan
+
+
+from pcs import settings
+from pcs.common import report_codes
+from pcs.lib import resource_agent as lib_ra
+from pcs.lib.errors import ReportItemSeverity as Severities
+from pcs.lib.external import CommandRunner
+
+
+class LibraryResourceTest(TestCase):
+    pass
+
+
+class GetParameterTest(LibraryResourceTest):
+    def test_with_all_data(self):
+        xml = """
+            <parameter name="test_param" required="1">
+                <longdesc>
+                    Long description
+                </longdesc>
+                <shortdesc>short description</shortdesc>
+                <content type="test_type" default="default_value" />
+            </parameter>
+        """
+        self.assertEqual(
+            {
+                "name": "test_param",
+                "longdesc": "Long description",
+                "shortdesc": "short description",
+                "type": "test_type",
+                "required": True,
+                "default": "default_value"
+            },
+            lib_ra._get_parameter(etree.XML(xml))
+        )
+
+    def test_minimal_data(self):
+        xml = '<parameter name="test_param" />'
+        self.assertEqual(
+            {
+                "name": "test_param",
+                "longdesc": "",
+                "shortdesc": "",
+                "type": "string",
+                "required": False,
+                "default": None
+            },
+            lib_ra._get_parameter(etree.XML(xml))
+        )
+
+    def test_no_name(self):
+        xml = '<parameter />'
+        assert_raise_library_error(
+            lambda: lib_ra._get_parameter(etree.XML(xml)),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_METADATA_FORMAT,
+                {},
+                True
+            )
+        )
+
+    def test_invalid_element(self):
+        xml = """
+            <param name="test_param" required="1">
+                <longdesc>
+                    Long description
+                </longdesc>
+                <shortdesc>short description</shortdesc>
+                <content type="test_type" default="default_value" />
+            </param>
+        """
+        assert_raise_library_error(
+            lambda: lib_ra._get_parameter(etree.XML(xml)),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_METADATA_FORMAT,
+                {},
+                True
+            )
+        )
+
+
+class GetAgentParametersTest(LibraryResourceTest):
+    def test_all_data(self):
+        xml = """
+            <resource-agent>
+                <parameters>
+                    <parameter name="test_param" required="1">
+                        <longdesc>
+                            Long description
+                        </longdesc>
+                        <shortdesc>short description</shortdesc>
+                        <content type="test_type" default="default_value" />
+                    </parameter>
+                    <parameter name="another parameter"/>
+                </parameters>
+            </resource-agent>
+        """
+        self.assertEqual(
+            [
+                {
+                    "name": "test_param",
+                    "longdesc": "Long description",
+                    "shortdesc": "short description",
+                    "type": "test_type",
+                    "required": True,
+                    "default": "default_value"
+                },
+                {
+                    "name": "another parameter",
+                    "longdesc": "",
+                    "shortdesc": "",
+                    "type": "string",
+                    "required": False,
+                    "default": None
+                }
+            ],
+            lib_ra._get_agent_parameters(etree.XML(xml))
+        )
+
+    def test_empty_parameters(self):
+        xml = """
+            <resource-agent>
+                <parameters />
+            </resource-agent>
+        """
+        self.assertEqual(0, len(lib_ra._get_agent_parameters(etree.XML(xml))))
+
+    def test_no_parameters(self):
+        xml = """
+            <resource-agent>
+                <longdesc />
+            </resource-agent>
+        """
+        self.assertEqual(0, len(lib_ra._get_agent_parameters(etree.XML(xml))))
+
+    def test_invalid_format(self):
+        xml = """
+            <resource-agent>
+                <parameters>
+                    <parameter />
+                </parameters>
+            </resource-agent>
+        """
+        assert_raise_library_error(
+            lambda: lib_ra._get_agent_parameters(etree.XML(xml)),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_METADATA_FORMAT,
+                {},
+                True
+            )
+        )
+
+
+class GetFenceAgentMetadataTest(LibraryResourceTest):
+    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
+    def test_invalid_agent_name(self, mock_obj):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_obj.return_value = True
+        agent_name = "agent"
+
+        assert_raise_library_error(
+            lambda: lib_ra.get_fence_agent_metadata(mock_runner, agent_name),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_RESOURCE_NAME,
+                {"agent_name": agent_name},
+                True
+            )
+        )
+
+        mock_runner.run.assert_not_called()
+
+    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
+    def test_relative_path_name(self, mock_obj):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_obj.return_value = True
+        agent_name = "fence_agent/../fence"
+
+        assert_raise_library_error(
+            lambda: lib_ra.get_fence_agent_metadata(mock_runner, agent_name),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_RESOURCE_NAME,
+                {"agent_name": agent_name},
+                True
+            )
+        )
+
+        mock_runner.run.assert_not_called()
+
+    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
+    def test_not_runnable(self, mock_obj):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_obj.return_value = False
+        agent_name = "fence_agent"
+
+        assert_raise_library_error(
+            lambda: lib_ra.get_fence_agent_metadata(mock_runner, agent_name),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_RESOURCE_NAME,
+                {"agent_name": agent_name},
+                True
+            )
+        )
+
+        mock_runner.run.assert_not_called()
+
+    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
+    def test_execution_failed(self, mock_is_runnable):
+        mock_is_runnable.return_value = True
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = ("", 1)
+        agent_name = "fence_ipmi"
+
+        assert_raise_library_error(
+            lambda: lib_ra.get_fence_agent_metadata(mock_runner, agent_name),
+            (
+                Severities.ERROR,
+                report_codes.UNABLE_TO_GET_AGENT_METADATA,
+                {"agent_name": agent_name},
+                True
+            )
+        )
+
+        script_path = os.path.join(settings.fence_agent_binaries, agent_name)
+        mock_runner.run.assert_called_once_with(
+            [script_path, "-o", "metadata"], ignore_stderr=True
+        )
+
+    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
+    def test_invalid_xml(self, mock_is_runnable):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = ("not xml", 0)
+        mock_is_runnable.return_value = True
+        agent_name = "fence_ipmi"
+
+        assert_raise_library_error(
+            lambda: lib_ra.get_fence_agent_metadata(mock_runner, agent_name),
+            (
+                Severities.ERROR,
+                report_codes.UNABLE_TO_GET_AGENT_METADATA,
+                {"agent_name": agent_name},
+                True
+            )
+        )
+
+        script_path = os.path.join(settings.fence_agent_binaries, agent_name)
+        mock_runner.run.assert_called_once_with(
+            [script_path, "-o", "metadata"], ignore_stderr=True
+        )
+
+    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
+    def test_success(self, mock_is_runnable):
+        agent_name = "fence_ipmi"
+        xml = "<xml />"
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (xml, 0)
+        mock_is_runnable.return_value = True
+
+        out_dom = lib_ra.get_fence_agent_metadata(mock_runner, agent_name)
+
+        script_path = os.path.join(settings.fence_agent_binaries, agent_name)
+        mock_runner.run.assert_called_once_with(
+            [script_path, "-o", "metadata"], ignore_stderr=True
+        )
+        assert_xml_equal(xml, str(XmlMan(out_dom)))
+
+
+class GetOcfResourceAgentMetadataTest(LibraryResourceTest):
+    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
+    def test_relative_path_provider(self, mock_is_runnable):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_is_runnable.return_value = True
+        provider = "provider/../provider2"
+        agent = "agent"
+
+        assert_raise_library_error(
+            lambda: lib_ra._get_ocf_resource_agent_metadata(
+                mock_runner, provider, agent
+            ),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_RESOURCE_NAME,
+                {"agent_name": "ocf:{0}:{1}".format(provider, agent)},
+                True
+            )
+        )
+
+        mock_runner.run.assert_not_called()
+
+    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
+    def test_relative_path_agent(self, mock_is_runnable):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_is_runnable.return_value = True
+        provider = "provider"
+        agent = "agent/../agent2"
+
+        assert_raise_library_error(
+            lambda: lib_ra._get_ocf_resource_agent_metadata(
+                mock_runner, provider, agent
+            ),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_RESOURCE_NAME,
+                {"agent_name": "ocf:{0}:{1}".format(provider, agent)},
+                True
+            )
+        )
+
+        mock_runner.run.assert_not_called()
+
+    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
+    def test_not_runnable(self, mock_is_runnable):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_is_runnable.return_value = False
+        provider = "provider"
+        agent = "agent"
+
+        assert_raise_library_error(
+            lambda: lib_ra._get_ocf_resource_agent_metadata(
+                mock_runner, provider, agent
+            ),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_RESOURCE_NAME,
+                {"agent_name": "ocf:{0}:{1}".format(provider, agent)},
+                True
+            )
+        )
+
+        mock_runner.run.assert_not_called()
+
+    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
+    def test_execution_failed(self, mock_is_runnable):
+        provider = "provider"
+        agent = "agent"
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = ("", 1)
+        mock_is_runnable.return_value = True
+
+        assert_raise_library_error(
+            lambda: lib_ra._get_ocf_resource_agent_metadata(
+                mock_runner, provider, agent
+            ),
+            (
+                Severities.ERROR,
+                report_codes.UNABLE_TO_GET_AGENT_METADATA,
+                {"agent_name": "ocf:{0}:{1}".format(provider, agent)},
+                True
+            )
+        )
+
+        script_path = os.path.join(settings.ocf_resources, provider, agent)
+        mock_runner.run.assert_called_once_with(
+            [script_path, "meta-data"],
+            env_extend={"OCF_ROOT": settings.ocf_root},
+            ignore_stderr=True
+        )
+
+    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
+    def test_invalid_xml(self, mock_is_runnable):
+        provider = "provider"
+        agent = "agent"
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = ("not xml", 0)
+        mock_is_runnable.return_value = True
+
+        assert_raise_library_error(
+            lambda: lib_ra._get_ocf_resource_agent_metadata(
+                mock_runner, provider, agent
+            ),
+            (
+                Severities.ERROR,
+                report_codes.UNABLE_TO_GET_AGENT_METADATA,
+                {"agent_name": "ocf:{0}:{1}".format(provider, agent)},
+                True
+            )
+        )
+
+        script_path = os.path.join(settings.ocf_resources, provider, agent)
+        mock_runner.run.assert_called_once_with(
+            [script_path, "meta-data"],
+            env_extend={"OCF_ROOT": settings.ocf_root},
+            ignore_stderr=True
+        )
+
+    @mock.patch("pcs.lib.resource_agent.is_path_runnable")
+    def test_success(self, mock_is_runnable):
+        provider = "provider"
+        agent = "agent"
+        xml = "<xml />"
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (xml, 0)
+        mock_is_runnable.return_value = True
+
+        out_dom = lib_ra._get_ocf_resource_agent_metadata(
+            mock_runner, provider, agent
+        )
+
+        script_path = os.path.join(settings.ocf_resources, provider, agent)
+        mock_runner.run.assert_called_once_with(
+            [script_path, "meta-data"],
+            env_extend={"OCF_ROOT": settings.ocf_root},
+            ignore_stderr=True
+        )
+        assert_xml_equal(xml, str(XmlMan(out_dom)))
+
+
+class GetNagiosResourceAgentMetadataTest(LibraryResourceTest):
+    def test_relative_path_name(self):
+        agent = "agent/../agent2"
+        assert_raise_library_error(
+            lambda: lib_ra._get_nagios_resource_agent_metadata(agent),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_RESOURCE_NAME,
+                {"agent_name": "nagios:" + agent},
+                True
+            )
+        )
+
+    @mock.patch("lxml.etree.parse")
+    def test_file_opening_exception(self, mock_obj):
+        agent = "agent"
+        mock_obj.side_effect = IOError()
+        assert_raise_library_error(
+            lambda: lib_ra._get_nagios_resource_agent_metadata(agent),
+            (
+                Severities.ERROR,
+                report_codes.UNABLE_TO_GET_AGENT_METADATA,
+                {"agent_name": "nagios:" + agent},
+                True
+            )
+        )
+
+    @mock.patch("lxml.etree.parse")
+    def test_invalid_xml(self, mock_obj):
+        agent = "agent"
+        mock_obj.side_effect = etree.XMLSyntaxError(None, None, None, None)
+        assert_raise_library_error(
+            lambda: lib_ra._get_nagios_resource_agent_metadata(agent),
+            (
+                Severities.ERROR,
+                report_codes.UNABLE_TO_GET_AGENT_METADATA,
+                {"agent_name": "nagios:" + agent},
+                True
+            )
+        )
+
+    @mock.patch("lxml.etree.parse")
+    def test_success(self, mock_obj):
+        agent = "agent"
+        xml = "<xml />"
+        mock_obj.return_value = etree.ElementTree(etree.XML(xml))
+        out_dom = lib_ra._get_nagios_resource_agent_metadata(agent)
+        metadata_path = os.path.join(
+            settings.nagios_metadata_path, agent + ".xml"
+        )
+
+        mock_obj.assert_called_once_with(metadata_path)
+        assert_xml_equal(xml, str(XmlMan(out_dom)))
+
+
+class GetAgentDescTest(LibraryResourceTest):
+    def test_invalid_metadata_format(self):
+        xml = "<xml />"
+        assert_raise_library_error(
+            lambda: lib_ra.get_agent_desc(etree.XML(xml)),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_METADATA_FORMAT,
+                {},
+                True
+            )
+        )
+
+    def test_no_desc(self):
+        xml = "<resource-agent />"
+        expected = {
+            "longdesc": "",
+            "shortdesc": ""
+        }
+        self.assertEqual(expected, lib_ra.get_agent_desc(etree.XML(xml)))
+
+    def test_shortdesc_attribute(self):
+        xml = '<resource-agent shortdesc="short description" />'
+        expected = {
+            "longdesc": "",
+            "shortdesc": "short description"
+        }
+        self.assertEqual(expected, lib_ra.get_agent_desc(etree.XML(xml)))
+
+    def test_shortdesc_element(self):
+        xml = """
+            <resource-agent>
+                <shortdesc>short description</shortdesc>
+            </resource-agent>
+        """
+        expected = {
+            "longdesc": "",
+            "shortdesc": "short description"
+        }
+        self.assertEqual(expected, lib_ra.get_agent_desc(etree.XML(xml)))
+
+    def test_longdesc(self):
+        xml = """
+            <resource-agent>
+                <longdesc>long description</longdesc>
+            </resource-agent>
+        """
+        expected = {
+            "longdesc": "long description",
+            "shortdesc": ""
+        }
+        self.assertEqual(expected, lib_ra.get_agent_desc(etree.XML(xml)))
+
+    def test_longdesc_and_shortdesc_attribute(self):
+        xml = """
+            <resource-agent shortdesc="short_desc">
+                <longdesc>long description</longdesc>
+            </resource-agent>
+        """
+        expected = {
+            "longdesc": "long description",
+            "shortdesc": "short_desc"
+        }
+        self.assertEqual(expected, lib_ra.get_agent_desc(etree.XML(xml)))
+
+    def test_longdesc_and_shortdesc_element(self):
+        xml = """
+            <resource-agent>
+                <shortdesc>short_desc</shortdesc>
+                <longdesc>long description</longdesc>
+            </resource-agent>
+        """
+        expected = {
+            "longdesc": "long description",
+            "shortdesc": "short_desc"
+        }
+        self.assertEqual(expected, lib_ra.get_agent_desc(etree.XML(xml)))
+
+
+class FilterFenceAgentParametersTest(LibraryResourceTest):
+    def test_filter(self):
+        params = [
+            {"name": "debug"},
+            {"name": "valid_param"},
+            {"name": "verbose"},
+            {"name": "help"},
+            {"name": "action"},
+            {"name": "another_param"},
+            {"name": "version"},
+        ]
+        self.assertEqual(
+            [
+                {"name": "valid_param"},
+                {"name": "another_param"}
+            ],
+            lib_ra._filter_fence_agent_parameters(params)
+        )
+
+
+class GetResourceAgentMetadata(LibraryResourceTest):
+    def test_unsupported_class(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        agent = "class:provider:agent"
+
+        assert_raise_library_error(
+            lambda: lib_ra.get_resource_agent_metadata(mock_runner, agent),
+            (
+                Severities.ERROR,
+                report_codes.UNSUPPORTED_RESOURCE_AGENT,
+                {},
+                True
+            )
+        )
+
+        mock_runner.run.assert_not_called()
+
+    def test_ocf_no_provider(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        agent = "ocf:agent"
+
+        assert_raise_library_error(
+            lambda: lib_ra.get_resource_agent_metadata(mock_runner, agent),
+            (
+                Severities.ERROR,
+                report_codes.UNSUPPORTED_RESOURCE_AGENT,
+                {},
+                True
+            )
+        )
+
+        mock_runner.run.assert_not_called()
+
+    @mock.patch("pcs.lib.resource_agent._get_ocf_resource_agent_metadata")
+    def test_ocf_ok(self, mock_obj):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        agent = "ocf:provider:agent"
+
+        lib_ra.get_resource_agent_metadata(mock_runner, agent)
+
+        mock_obj.assert_called_once_with(mock_runner, "provider", "agent")
+
+    @mock.patch("pcs.lib.resource_agent._get_nagios_resource_agent_metadata")
+    def test_nagios_ok(self, mock_obj):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        agent = "nagios:agent"
+
+        lib_ra.get_resource_agent_metadata(mock_runner, agent)
+
+        mock_obj.assert_called_once_with("agent")
+        mock_runner.run.assert_not_called()
+
+
+class GetPcmkAdvancedStonithParametersTest(LibraryResourceTest):
+    def test_all_advanced(self):
+        xml = """
+            <resource-agent>
+                <parameters>
+                    <parameter name="test_param" required="0">
+                        <longdesc>
+                             Long description
+                        </longdesc>
+                        <shortdesc>
+                             Advanced use only: short description
+                        </shortdesc>
+                        <content type="test_type" default="default_value" />
+                    </parameter>
+                    <parameter name="another parameter"/>
+                </parameters>
+            </resource-agent>
+        """
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = (xml, 0)
+        self.assertEqual(
+            [
+                {
+                    "name": "test_param",
+                    "longdesc":
+                        "Advanced use only: short description\nLong "
+                        "description",
+                    "shortdesc": "Advanced use only: short description",
+                    "type": "test_type",
+                    "required": False,
+                    "default": "default_value",
+                    "advanced": True
+                },
+                {
+                    "name": "another parameter",
+                    "longdesc": "",
+                    "shortdesc": "",
+                    "type": "string",
+                    "required": False,
+                    "default": None,
+                    "advanced": False
+                }
+            ],
+            lib_ra._get_pcmk_advanced_stonith_parameters(mock_runner)
+        )
+        mock_runner.run.assert_called_once_with(
+            [settings.stonithd_binary, "metadata"], ignore_stderr=True
+        )
+
+    def test_failed_to_get_xml(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = ("", 1)
+        assert_raise_library_error(
+            lambda: lib_ra._get_pcmk_advanced_stonith_parameters(mock_runner),
+            (
+                Severities.ERROR,
+                report_codes.UNABLE_TO_GET_AGENT_METADATA,
+                {},
+                True
+            )
+        )
+        mock_runner.run.assert_called_once_with(
+            [settings.stonithd_binary, "metadata"], ignore_stderr=True
+        )
+
+    def test_invalid_xml(self):
+        mock_runner = mock.MagicMock(spec_set=CommandRunner)
+        mock_runner.run.return_value = ("invalid XML", 0)
+        assert_raise_library_error(
+            lambda: lib_ra._get_pcmk_advanced_stonith_parameters(mock_runner),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_METADATA_FORMAT,
+                {},
+                True
+            )
+        )
+        mock_runner.run.assert_called_once_with(
+            [settings.stonithd_binary, "metadata"], ignore_stderr=True
+        )
+
+
+class GetActionTest(LibraryResourceTest):
+    def test_name_and_params(self):
+        xml = '''
+            <action name="required" param="value" another_param="same_value" />
+        '''
+        self.assertEqual(
+            lib_ra._get_action(etree.XML(xml)),
+            {
+                "name": "required",
+                "another_param": "same_value",
+                "param": "value"
+            }
+        )
+
+    def test_name_only(self):
+        xml = '''
+            <action name="required" />
+        '''
+        self.assertEqual(
+            lib_ra._get_action(etree.XML(xml)), {"name": "required"}
+        )
+
+    def test_empty(self):
+        xml = '<action />'
+        assert_raise_library_error(
+            lambda: lib_ra._get_action(etree.XML(xml)),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_METADATA_FORMAT,
+                {},
+                True
+            )
+        )
+
+    def test_no_name(self):
+        xml = '<action param="value" another_param="same_value" />'
+        assert_raise_library_error(
+            lambda: lib_ra._get_action(etree.XML(xml)),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_METADATA_FORMAT,
+                {},
+                True
+            )
+        )
+
+    def test_not_action_element(self):
+        xml = '<actions param="value" another_param="same_value" />'
+        assert_raise_library_error(
+            lambda: lib_ra._get_action(etree.XML(xml)),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_METADATA_FORMAT,
+                {},
+                True
+            )
+        )
+
+
+class GetAgentActionsTest(LibraryResourceTest):
+    def test_multiple_actions(self):
+        xml = """
+            <resource-agent>
+                <actions>
+                    <action name="on" automatic="0"/>
+                    <action name="off" />
+                    <action name="reboot" />
+                    <action name="status" />
+                </actions>
+            </resource-agent>
+        """
+        self.assertEqual(
+            lib_ra.get_agent_actions(etree.XML(xml)),
+            [
+                {
+                    "name": "on",
+                    "automatic": "0"
+                },
+                {"name": "off"},
+                {"name": "reboot"},
+                {"name": "status"}
+            ]
+        )
+
+    def test_root_is_not_resource_agent(self):
+        xml = """
+            <agent>
+                <actions>
+                    <action name="on" automatic="0"/>
+                    <action name="off" />
+                </actions>
+            </agent>
+        """
+        assert_raise_library_error(
+            lambda: lib_ra._get_action(etree.XML(xml)),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_METADATA_FORMAT,
+                {},
+                True
+            )
+        )
+
+    def test_action_without_name(self):
+        xml = """
+            <resource-agent>
+                <actions>
+                    <action name="on" automatic="0"/>
+                    <action />
+                    <action name="reboot" />
+                    <action name="status" />
+                </actions>
+            </resource-agent>
+        """
+        assert_raise_library_error(
+            lambda: lib_ra._get_action(etree.XML(xml)),
+            (
+                Severities.ERROR,
+                report_codes.INVALID_METADATA_FORMAT,
+                {},
+                True
+            )
+        )
+
+    def test_empty_actions(self):
+        xml = """
+            <resource-agent>
+                <actions />
+            </resource-agent>
+        """
+        self.assertEqual(len(lib_ra.get_agent_actions(etree.XML(xml))), 0)
+
+    def test_no_actions(self):
+        xml = "<resource-agent />"
+        self.assertEqual(len(lib_ra.get_agent_actions(etree.XML(xml))), 0)
+
+
+class ValidateResourceInstanceAttributesTest(LibraryResourceTest):
+    def setUp(self):
+        self.xml = etree.XML("<xml />")
+        self.params = [
+            {
+                "name": "test_param",
+                "longdesc": "Long description",
+                "shortdesc": "short description",
+                "type": "string",
+                "required": False,
+                "default": "default_value"
+            },
+            {
+                "name": "required_param",
+                "longdesc": "",
+                "shortdesc": "",
+                "type": "boolean",
+                "required": True,
+                "default": None
+            },
+            {
+                "name": "another parameter",
+                "longdesc": "",
+                "shortdesc": "",
+                "type": "string",
+                "required": True,
+                "default": None
+            }
+        ]
+
+    def test_only_required(self):
+        attrs = ["another parameter", "required_param"]
+        self.assertEqual(
+            lib_ra._validate_instance_attributes(self.params, attrs),
+            ([], [])
+        )
+
+    def test_optional(self):
+        attrs = ["another parameter", "required_param", "test_param"]
+        self.assertEqual(
+            lib_ra._validate_instance_attributes(self.params, attrs),
+            ([], [])
+        )
+
+    def test_bad_attrs(self):
+        attrs = ["another parameter", "required_param", "unknown_param"]
+        self.assertEqual(
+            lib_ra._validate_instance_attributes(self.params, attrs),
+            (["unknown_param"], [])
+        )
+
+    def test_bad_attrs_and_missing_required(self):
+        attrs = ["unknown_param", "test_param"]
+        bad, missing = lib_ra._validate_instance_attributes(self.params, attrs)
+        self.assertEqual(["unknown_param"], bad)
+        self.assertEqual(
+            sorted(["another parameter", "required_param"]),
+            sorted(missing)
+        )
+
+
+ at mock.patch("pcs.lib.resource_agent._validate_instance_attributes")
+ at mock.patch("pcs.lib.resource_agent.get_fence_agent_parameters")
+ at mock.patch("pcs.lib.resource_agent.get_fence_agent_metadata")
+ at mock.patch("pcs.lib.resource_agent.get_resource_agent_parameters")
+ at mock.patch("pcs.lib.resource_agent.get_resource_agent_metadata")
+class ValidateInstanceAttributesTest(LibraryResourceTest):
+    def setUp(self):
+        self.runner = mock.MagicMock(spec_set=CommandRunner)
+        self.valid_ret_val = (
+            ["test_parm", "another"], ["nothing here", "port"]
+        )
+        self.xml = etree.XML("<xml />")
+        self.instance_attrs = ["param", "another_one"]
+        self.attrs = [
+            {
+                "name": "test_param",
+                "longdesc": "Long description",
+                "shortdesc": "short description",
+                "type": "string",
+                "required": False,
+                "default": "default_value"
+            },
+            {
+                "name": "required_param",
+                "longdesc": "",
+                "shortdesc": "",
+                "type": "boolean",
+                "required": True,
+                "default": None
+            }
+        ]
+
+    def test_resource(
+        self, res_met_mock, res_par_mock, fen_met_mock, fen_par_mock, valid_mock
+    ):
+        agent = "ocf:pacemaker:Dummy"
+        res_met_mock.return_value = self.xml
+        res_par_mock.return_value = self.attrs
+        valid_mock.return_value = self.valid_ret_val
+        self.assertEqual(
+            self.valid_ret_val,
+            lib_ra.validate_instance_attributes(
+                self.runner, self.instance_attrs, agent
+            )
+        )
+        res_met_mock.assert_called_once_with(self.runner, agent)
+        res_par_mock.assert_called_once_with(self.xml)
+        valid_mock.assert_called_once_with(self.attrs, self.instance_attrs)
+        fen_met_mock.assert_not_called()
+        fen_par_mock.assert_not_called()
+
+    def test_fence(
+        self, res_met_mock, res_par_mock, fen_met_mock, fen_par_mock, valid_mock
+    ):
+        agent = "stonith:fence_test"
+        fen_met_mock.return_value = self.xml
+        fen_par_mock.return_value = self.attrs
+        valid_mock.return_value = self.valid_ret_val
+        self.assertEqual(
+            (["test_parm", "another"], ["nothing here"]),
+            lib_ra.validate_instance_attributes(
+                self.runner, self.instance_attrs, agent
+            )
+        )
+        fen_met_mock.assert_called_once_with(self.runner, "fence_test")
+        fen_par_mock.assert_called_once_with(self.runner, self.xml)
+        valid_mock.assert_called_once_with(self.attrs, self.instance_attrs)
+        res_met_mock.assert_not_called()
+        res_par_mock.assert_not_called()
diff --git a/pcs/test/test_node.py b/pcs/test/test_node.py
index 7957016..023148c 100644
--- a/pcs/test/test_node.py
+++ b/pcs/test/test_node.py
@@ -1,21 +1,23 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
-import os,sys
 import shutil
 import unittest
-currentdir = os.path.dirname(os.path.abspath(__file__))
-parentdir = os.path.dirname(currentdir)
-sys.path.insert(0, parentdir)
-import utils
-from pcs_test_functions import pcs,ac
 
-empty_cib = os.path.join(currentdir, "empty-withnodes.xml")
-temp_cib = os.path.join(currentdir, "temp.xml")
+from pcs.test.tools.misc import (
+    ac,
+    get_test_resource as rc,
+)
+from pcs.test.tools.pcs_runner import pcs
 
-class ClusterTest(unittest.TestCase):
+empty_cib = rc("cib-empty-withnodes.xml")
+temp_cib = rc("temp-cib.xml")
+
+class NodeTest(unittest.TestCase):
     def setUp(self):
         shutil.copy(empty_cib, temp_cib)
 
@@ -138,6 +140,40 @@ Cluster Properties:
 """
         ac(expected_out, output)
 
+    def test_node_standby(self):
+        output, returnVal = pcs(temp_cib, "node standby rh7-1")
+        ac(output, "")
+        self.assertEqual(returnVal, 0)
+
+        # try to standby node which is already in standby mode
+        output, returnVal = pcs(temp_cib, "node standby rh7-1")
+        ac(output, "")
+        self.assertEqual(returnVal, 0)
+
+        output, returnVal = pcs(temp_cib, "node unstandby rh7-1")
+        ac(output, "")
+        self.assertEqual(returnVal, 0)
+
+        # try to unstandby node which is no in standby mode
+        output, returnVal = pcs(temp_cib, "node unstandby rh7-1")
+        ac(output, "")
+        self.assertEqual(returnVal, 0)
+
+        output, returnVal = pcs(temp_cib, "node standby nonexistant-node")
+        self.assertEqual(
+            output,
+            "Error: node 'nonexistant-node' does not appear to exist in configuration\n"
+        )
+        self.assertEqual(returnVal, 1)
+
+        output, returnVal = pcs(temp_cib, "node unstandby nonexistant-node")
+        self.assertEqual(
+            output,
+            "Error: node 'nonexistant-node' does not appear to exist in configuration\n"
+        )
+        self.assertEqual(returnVal, 1)
+
+
     def test_node_utilization_set(self):
         output, returnVal = pcs(temp_cib, "node utilization rh7-1 test1=10")
         ac("", output)
@@ -216,6 +252,3 @@ Error: Value of utilization attribute must be integer: 'test=int'
 """
         ac(expected_out, output)
         self.assertEqual(1, returnVal)
-
-if __name__ == "__main__":
-    unittest.main()
diff --git a/pcs/test/test_properties.py b/pcs/test/test_properties.py
index fe2d436..6cdd2e5 100644
--- a/pcs/test/test_properties.py
+++ b/pcs/test/test_properties.py
@@ -1,25 +1,30 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
-import os,sys
 import shutil
 import unittest
-parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-sys.path.insert(0,parentdir) 
-import utils
-from pcs_test_functions import pcs,ac
 
-empty_cib = "empty.xml"
-temp_cib = "temp.xml"
+from pcs.test.tools.misc import (
+    ac,
+    get_test_resource as rc,
+)
+from pcs.test.tools.pcs_runner import pcs
+
+from pcs import utils
+
+empty_cib = rc("cib-empty.xml")
+temp_cib = rc("temp-cib.xml")
 
 class PropertyTest(unittest.TestCase):
     def setUp(self):
         shutil.copy(empty_cib, temp_cib)
 
     def testEmpty(self):
-        output, returnVal = pcs(temp_cib, "property") 
+        output, returnVal = pcs(temp_cib, "property")
         assert returnVal == 0, 'Unable to list resources'
         assert output == "Cluster Properties:\n", [output]
 
@@ -324,7 +329,3 @@ class PropertyTest(unittest.TestCase):
  default-resource-stickiness: 0.1
 """
         )
-
-if __name__ == "__main__":
-    unittest.main()
-
diff --git a/pcs/test/test_quorum.py b/pcs/test/test_quorum.py
new file mode 100644
index 0000000..b85b880
--- /dev/null
+++ b/pcs/test/test_quorum.py
@@ -0,0 +1,388 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import shutil
+from unittest import TestCase
+
+from pcs.test.tools.assertions import AssertPcsMixin
+from pcs.test.tools.misc import (
+    get_test_resource as rc,
+)
+from pcs.test.tools.pcs_runner import PcsRunner
+
+
+coro_conf = rc("corosync.conf")
+coro_qdevice_conf = rc("corosync-3nodes-qdevice.conf")
+temp_conf = rc("corosync.conf.tmp")
+
+
+class TestBase(TestCase, AssertPcsMixin):
+    def setUp(self):
+        shutil.copy(coro_conf, temp_conf)
+        self.pcs_runner = PcsRunner(corosync_conf_file=temp_conf)
+
+    def fixture_conf_qdevice(self):
+        shutil.copy(coro_qdevice_conf, temp_conf)
+
+
+class QuorumConfigTest(TestBase):
+    def test_no_device(self):
+        self.assert_pcs_success(
+            "quorum config",
+            "Options:\n"
+        )
+
+    def test_with_device(self):
+        self.fixture_conf_qdevice()
+        self.assert_pcs_success(
+            "quorum config",
+            """\
+Options:
+Device:
+  Model: net
+    host: 127.0.0.1
+"""
+        )
+
+
+class QuorumUpdateTest(TestBase):
+    def test_no_options(self):
+        self.assert_pcs_fail(
+            "quorum update",
+            stdout_start="\nUsage: pcs quorum <command>\n    update "
+        )
+
+    def test_invalid_option(self):
+        self.assert_pcs_fail(
+            "quorum update nonsense=invalid",
+            "Error: invalid quorum option 'nonsense', allowed options are: "
+                + "auto_tie_breaker, last_man_standing, "
+                + "last_man_standing_window, wait_for_all\n"
+        )
+
+    def test_invalid_value(self):
+        self.assert_pcs_fail(
+            "quorum update wait_for_all=invalid",
+            "Error: 'invalid' is not a valid wait_for_all value, use 0, 1\n"
+        )
+
+    def test_success(self):
+        self.assert_pcs_success(
+            "quorum config",
+            """\
+Options:
+"""
+        )
+        self.assert_pcs_success(
+            "quorum update wait_for_all=1"
+        )
+        self.assert_pcs_success(
+            "quorum config",
+            """\
+Options:
+  wait_for_all: 1
+"""
+        )
+
+
+class DeviceAddTest(TestBase):
+    def test_no_model_keyword(self):
+        self.assert_pcs_fail(
+            "quorum device add option=value host=127.0.0.1",
+            stdout_start="\nUsage: pcs quorum <command>\n    device add "
+        )
+
+        self.assert_pcs_fail(
+            "quorum device add option=value host=127.0.0.1 --force",
+            stdout_start="\nUsage: pcs quorum <command>\n    device add "
+        )
+
+    def test_no_model_value(self):
+        self.assert_pcs_fail(
+            "quorum device add option=value model host=127.0.0.1",
+            stdout_start="\nUsage: pcs quorum <command>\n    device add "
+        )
+        self.assert_pcs_fail(
+            "quorum device add option=value model host=127.0.0.1 --force",
+            stdout_start="\nUsage: pcs quorum <command>\n    device add "
+        )
+
+    def test_more_models(self):
+        self.assert_pcs_fail(
+            "quorum device add model net host=127.0.0.1 model disk",
+            stdout_start="\nUsage: pcs quorum <command>\n    device add "
+        )
+        self.assert_pcs_fail(
+            "quorum device add model net host=127.0.0.1 model disk --force",
+            stdout_start="\nUsage: pcs quorum <command>\n    device add "
+        )
+
+    def test_model_in_options(self):
+        self.assert_pcs_fail(
+            "quorum device add model=disk model net host=127.0.0.1",
+            "Error: Model cannot be specified in generic options\n"
+        )
+        self.assert_pcs_fail(
+            "quorum device add model=disk model net host=127.0.0.1 --force",
+            "Error: Model cannot be specified in generic options\n"
+        )
+
+    def test_device_already_set(self):
+        self.fixture_conf_qdevice()
+        self.assert_pcs_fail(
+            "quorum device add model net host=127.0.0.1",
+            "Error: quorum device is already defined\n"
+        )
+        self.assert_pcs_fail(
+            "quorum device add model net host=127.0.0.1 --force",
+            "Error: quorum device is already defined\n"
+        )
+
+    def test_success_model_only(self):
+        self.assert_pcs_success(
+            "quorum device add model net host=127.0.0.1"
+        )
+        self.assert_pcs_success(
+            "quorum config",
+            """\
+Options:
+Device:
+  Model: net
+    host: 127.0.0.1
+"""
+        )
+
+    def test_succes_all_options(self):
+        self.assert_pcs_success(
+            "quorum device add timeout=12345 model net host=127.0.0.1"
+        )
+        self.assert_pcs_success(
+            "quorum config",
+            """\
+Options:
+Device:
+  timeout: 12345
+  Model: net
+    host: 127.0.0.1
+"""
+        )
+
+    def test_missing_required_options(self):
+        self.assert_pcs_fail(
+            "quorum device add model net",
+            "Error: required option 'host' is missing\n"
+        )
+        self.assert_pcs_fail(
+            "quorum device add model net --force",
+            "Error: required option 'host' is missing\n"
+        )
+
+    def test_bad_options(self):
+        self.assert_pcs_fail(
+            "quorum device add a=b timeout=-1 model net host=127.0.0.1 port=x c=d",
+            """\
+Error: invalid quorum device model option 'c', allowed options are: algorithm, connect_timeout, force_ip_version, host, port, tie_breaker, use --force to override
+Error: 'x' is not a valid port value, use 1-65535, use --force to override
+Error: invalid quorum device option 'a', allowed options are: sync_timeout, timeout, use --force to override
+Error: '-1' is not a valid timeout value, use positive integer, use --force to override
+"""
+        )
+
+        self.assert_pcs_success(
+            "quorum device add a=b timeout=-1 model net host=127.0.0.1 port=x c=d --force",
+            """\
+Warning: invalid quorum device model option 'c', allowed options are: algorithm, connect_timeout, force_ip_version, host, port, tie_breaker
+Warning: 'x' is not a valid port value, use 1-65535
+Warning: invalid quorum device option 'a', allowed options are: sync_timeout, timeout
+Warning: '-1' is not a valid timeout value, use positive integer
+"""
+        )
+        self.assert_pcs_success(
+            "quorum config",
+            """\
+Options:
+Device:
+  a: b
+  timeout: -1
+  Model: net
+    c: d
+    host: 127.0.0.1
+    port: x
+"""
+        )
+
+    def test_bad_model(self):
+        self.assert_pcs_fail(
+            "quorum device add model invalid x=y",
+            "Error: 'invalid' is not a valid model value, use net, use --force to override\n"
+        )
+        self.assert_pcs_success(
+            "quorum device add model invalid x=y --force",
+            "Warning: 'invalid' is not a valid model value, use net\n"
+        )
+        self.assert_pcs_success(
+            "quorum config",
+            """\
+Options:
+Device:
+  Model: invalid
+    x: y
+"""
+        )
+
+
+class DeviceRemoveTest(TestBase):
+    def test_no_device(self):
+        self.assert_pcs_fail(
+            "quorum device remove",
+            "Error: no quorum device is defined in this cluster\n"
+        )
+        self.assert_pcs_fail(
+            "quorum device remove --force",
+            "Error: no quorum device is defined in this cluster\n"
+        )
+
+    def test_success(self):
+        self.fixture_conf_qdevice()
+        self.assert_pcs_success(
+            "quorum device remove"
+        )
+        self.assert_pcs_success(
+            "quorum config",
+            "Options:\n"
+        )
+
+    def test_bad_options(self):
+        self.assert_pcs_fail(
+            "quorum device remove net",
+            stdout_start="\nUsage: pcs quorum <command>\n    device remove\n"
+        )
+        self.assert_pcs_fail(
+            "quorum device remove net --force",
+            stdout_start="\nUsage: pcs quorum <command>\n    device remove\n"
+        )
+
+
+class DeviceUpdateTest(TestBase):
+    def test_no_device(self):
+        self.assert_pcs_fail(
+            "quorum device update option=new_value model host=127.0.0.2",
+            "Error: no quorum device is defined in this cluster\n"
+        )
+        self.assert_pcs_fail(
+            "quorum device update option=new_value model host=127.0.0.2 --force",
+            "Error: no quorum device is defined in this cluster\n"
+        )
+
+    def test_generic_options_change(self):
+        self.fixture_conf_qdevice()
+        self.assert_pcs_success("quorum device update timeout=12345")
+        self.assert_pcs_success(
+            "quorum config",
+            """\
+Options:
+Device:
+  timeout: 12345
+  Model: net
+    host: 127.0.0.1
+"""
+        )
+
+    def test_model_options_change(self):
+        self.fixture_conf_qdevice()
+        self.assert_pcs_success("quorum device update model host=127.0.0.2")
+        self.assert_pcs_success(
+            "quorum config",
+            """\
+Options:
+Device:
+  Model: net
+    host: 127.0.0.2
+"""
+        )
+
+    def test_both_options_change(self):
+        self.fixture_conf_qdevice()
+        self.assert_pcs_success(
+            "quorum device update timeout=12345 model host=127.0.0.2 port=1"
+        )
+        self.assert_pcs_success(
+            "quorum config",
+            """\
+Options:
+Device:
+  timeout: 12345
+  Model: net
+    host: 127.0.0.2
+    port: 1
+"""
+        )
+
+    def test_more_models(self):
+        self.assert_pcs_fail(
+            "quorum device update model host=127.0.0.2 model port=1",
+            stdout_start="\nUsage: pcs quorum <command>\n    device update "
+        )
+        self.assert_pcs_fail(
+            "quorum device update model host=127.0.0.2 model port=1 --force",
+            stdout_start="\nUsage: pcs quorum <command>\n    device update "
+        )
+
+    def test_model_in_options(self):
+        self.assert_pcs_fail(
+            "quorum device update model=disk",
+            "Error: Model cannot be specified in generic options\n"
+        )
+        self.assert_pcs_fail(
+            "quorum device update model=disk --force",
+            "Error: Model cannot be specified in generic options\n"
+        )
+
+    def test_missing_required_options(self):
+        self.fixture_conf_qdevice()
+        self.assert_pcs_fail(
+            "quorum device update model host=",
+            "Error: required option 'host' is missing\n"
+        )
+        self.assert_pcs_fail(
+            "quorum device update model host= --force",
+            "Error: required option 'host' is missing\n"
+        )
+
+    def test_bad_options(self):
+        self.fixture_conf_qdevice()
+        self.assert_pcs_fail(
+            "quorum device update a=b timeout=-1 model port=x c=d",
+            """\
+Error: invalid quorum device model option 'c', allowed options are: algorithm, connect_timeout, force_ip_version, host, port, tie_breaker, use --force to override
+Error: 'x' is not a valid port value, use 1-65535, use --force to override
+Error: invalid quorum device option 'a', allowed options are: sync_timeout, timeout, use --force to override
+Error: '-1' is not a valid timeout value, use positive integer, use --force to override
+"""
+        )
+        self.assert_pcs_success(
+            "quorum device update a=b timeout=-1 model port=x c=d --force",
+            """\
+Warning: invalid quorum device model option 'c', allowed options are: algorithm, connect_timeout, force_ip_version, host, port, tie_breaker
+Warning: 'x' is not a valid port value, use 1-65535
+Warning: invalid quorum device option 'a', allowed options are: sync_timeout, timeout
+Warning: '-1' is not a valid timeout value, use positive integer
+"""
+        )
+        self.assert_pcs_success(
+            "quorum config",
+            """\
+Options:
+Device:
+  a: b
+  timeout: -1
+  Model: net
+    c: d
+    host: 127.0.0.1
+    port: x
+"""
+        )
diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
index 25e1167..3ccfe9b 100644
--- a/pcs/test/test_resource.py
+++ b/pcs/test/test_resource.py
@@ -1,33 +1,37 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
 import os
-import sys
 import shutil
 import re
 import unittest
-parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-sys.path.insert(0, parentdir)
 
-import utils
-from pcs_test_functions import pcs, ac
-import resource
-from pcs_test_functions import PcsRunner
-from pcs_test_assertions import AssertPcsMixin
+from pcs.test.tools.assertions import AssertPcsMixin
+from pcs.test.tools.misc import (
+    ac,
+    get_test_resource as rc,
+)
+from pcs.test.tools.pcs_runner import (
+    pcs,
+    PcsRunner,
+)
 
+from pcs import utils
+from pcs import resource
 
-empty_cib = "empty.xml"
-temp_cib = "temp.xml"
-large_cib = "large.xml"
-temp_large_cib = "temp-large.xml"
+empty_cib = rc("cib-empty.xml")
+temp_cib = rc("temp-cib.xml")
+large_cib = rc("cib-large.xml")
+temp_large_cib  = rc("temp-cib-large.xml")
 
 class ResourceTest(unittest.TestCase):
     def setUp(self):
         shutil.copy(empty_cib, temp_cib)
         shutil.copy(large_cib, temp_large_cib)
-        shutil.copy("corosync.conf.orig", "corosync.conf")
 
     # Setups up a cluster with Resources, groups, master/slave resource & clones
     def setupClusterA(self,temp_cib):
@@ -1058,6 +1062,7 @@ Location Constraints:
     Enabled on: rh7-1 (score:INFINITY)
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
 
         o,r = pcs(temp_cib, "resource ungroup AGroup")
@@ -1534,6 +1539,7 @@ Fencing Levels:
 Location Constraints:
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 
 Resources Defaults:
  No defaults set
@@ -1541,6 +1547,9 @@ Operations Defaults:
  No defaults set
 
 Cluster Properties:
+
+Quorum:
+  Options:
 """)
 
     def testCloneRemove(self):
@@ -1693,6 +1702,7 @@ Location Constraints:
     Enabled on: rh7-2 (score:INFINITY) (id:location-ClusterIP5-rh7-2-INFINITY)
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 
 Resources Defaults:
  No defaults set
@@ -1700,6 +1710,9 @@ Operations Defaults:
  No defaults set
 
 Cluster Properties:
+
+Quorum:
+  Options:
 """)
 
         output, returnVal = pcs(temp_large_cib, "resource master dummylarge")
@@ -1860,8 +1873,8 @@ Deleting Resource (and group and M/S) - dummylarge
         assert returnVal == 0
 
         output, returnVal = pcs(temp_cib, "resource update clone-unmanage-clone meta is-managed=false")
-        assert returnVal == 0
-        ac (output, '')
+        ac(output, '')
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource show clone-unmanage-clone")
         assert returnVal == 0
@@ -1940,48 +1953,62 @@ Deleting Resource (and group and M/S) - dummylarge
         ac (output, ' Master: master-unmanage-master\n  Resource: master-unmanage (class=ocf provider=pacemaker type=Stateful)\n   Operations: monitor interval=60s (master-unmanage-monitor-interval-60s)\n')
 
     def testGroupManage(self):
-        o,r = pcs(temp_cib, "resource create --no-default-ops D1 Dummy --group AG")
+        o, r = pcs(temp_cib, "resource create --no-default-ops D1 Dummy --group AG")
+        self.assertEqual(r, 0)
         ac(o,"")
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops D2 Dummy --group AG")
+        o, r = pcs(temp_cib, "resource create --no-default-ops D2 Dummy --group AG")
+        self.assertEqual(r, 0)
         ac(o,"")
 
-        o,r = pcs(temp_cib, "resource --full")
+        o, r = pcs(temp_cib, "resource --full")
+        self.assertEqual(r, 0)
         ac(o," Group: AG\n  Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n   Operations: monitor interval=60s (D1-monitor-interval-60s)\n  Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n   Operations: monitor interval=60s (D2-monitor-interval-60s)\n")
 
-        o,r = pcs(temp_cib, "resource unmanage AG")
+        o, r = pcs(temp_cib, "resource unmanage AG")
+        self.assertEqual(r, 0)
         ac(o,"")
 
-        o,r = pcs(temp_cib, "resource --full")
+        o, r = pcs(temp_cib, "resource --full")
+        self.assertEqual(r, 0)
         ac(o," Group: AG\n  Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n   Meta Attrs: is-managed=false \n   Operations: monitor interval=60s (D1-monitor-interval-60s)\n  Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n   Meta Attrs: is-managed=false \n   Operations: monitor interval=60s (D2-monitor-interval-60s)\n")
 
-        o,r = pcs(temp_cib, "resource manage AG")
+        o, r = pcs(temp_cib, "resource manage AG")
+        self.assertEqual(r, 0)
         ac(o,"")
 
-        o,r = pcs(temp_cib, "resource --full")
+        o, r = pcs(temp_cib, "resource --full")
+        self.assertEqual(r, 0)
         ac(o," Group: AG\n  Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n   Operations: monitor interval=60s (D1-monitor-interval-60s)\n  Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n   Operations: monitor interval=60s (D2-monitor-interval-60s)\n")
 
-        o,r = pcs(temp_cib, "resource unmanage D2")
+        o, r = pcs(temp_cib, "resource unmanage D2")
+        self.assertEqual(r, 0)
         ac(o,"")
 
-        o,r = pcs(temp_cib, "resource --full")
+        o, r = pcs(temp_cib, "resource --full")
+        self.assertEqual(r, 0)
         ac(o," Group: AG\n  Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n   Operations: monitor interval=60s (D1-monitor-interval-60s)\n  Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n   Meta Attrs: is-managed=false \n   Operations: monitor interval=60s (D2-monitor-interval-60s)\n")
 
-        o,r = pcs(temp_cib, "resource manage AG")
+        o, r = pcs(temp_cib, "resource manage AG")
+        self.assertEqual(r, 0)
         ac(o,"")
 
-        o,r = pcs(temp_cib, "resource --full")
+        o, r = pcs(temp_cib, "resource --full")
+        self.assertEqual(r, 0)
         ac(o," Group: AG\n  Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n   Operations: monitor interval=60s (D1-monitor-interval-60s)\n  Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n   Operations: monitor interval=60s (D2-monitor-interval-60s)\n")
 
-        o,r = pcs(temp_cib, "resource unmanage D2")
+        o, r = pcs(temp_cib, "resource unmanage D2")
+        self.assertEqual(r, 0)
         ac(o,"")
 
-        o,r = pcs(temp_cib, "resource unmanage D1")
+        o, r = pcs(temp_cib, "resource unmanage D1")
+        self.assertEqual(r, 0)
         ac(o,"")
 
         os.system("CIB_file="+temp_cib+" crm_resource --resource AG --set-parameter is-managed --meta --parameter-value false --force > /dev/null")
 
-        o,r = pcs(temp_cib, "resource --full")
+        o, r = pcs(temp_cib, "resource --full")
+        self.assertEqual(r, 0)
         ac(o,"""\
  Group: AG
   Meta Attrs: is-managed=false 
@@ -1994,10 +2021,12 @@ Deleting Resource (and group and M/S) - dummylarge
 """)
 
 
-        o,r = pcs(temp_cib, "resource manage AG")
+        o, r = pcs(temp_cib, "resource manage AG")
+        self.assertEqual(r, 0)
         ac(o,"")
 
-        o,r = pcs(temp_cib, "resource --full")
+        o, r = pcs(temp_cib, "resource --full")
+        self.assertEqual(r, 0)
         ac(o," Group: AG\n  Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n   Operations: monitor interval=60s (D1-monitor-interval-60s)\n  Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n   Operations: monitor interval=60s (D2-monitor-interval-60s)\n")
 
     def testMasterMetaCreate(self):
@@ -2118,7 +2147,7 @@ Deleting Resource (and group and M/S) - dummylarge
         assert r == 0
 
         o,r = pcs(temp_cib, "constraint")
-        ac(o,"Location Constraints:\n  Resource: D0-clone\n    Enabled on: rh7-1 (score:INFINITY)\nOrdering Constraints:\nColocation Constraints:\n")
+        ac(o,"Location Constraints:\n  Resource: D0-clone\n    Enabled on: rh7-1 (score:INFINITY)\nOrdering Constraints:\nColocation Constraints:\nTicket Constraints:\n")
         assert r == 0
 
         o,r = pcs(temp_cib, "resource unclone D0-clone")
@@ -2610,11 +2639,11 @@ Deleting Resource (and group and M/S) - dummylarge
         assert output == "", [output]
 
         output, returnval = pcs(temp_cib, "resource update D2 blah=blah")
-        assert returnVal == 0
+        assert returnval == 0
         assert output == "", [output]
 
         output, returnval = pcs(temp_cib, "resource update D2")
-        assert returnVal == 0
+        assert returnval == 0
         assert output == "", [output]
 
     def testResourceMoveBanClear(self):
@@ -2649,6 +2678,7 @@ Location Constraints:
     Enabled on: rh7-1 (score:INFINITY) (role: Started) (id:cli-prefer-dummy)
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
         self.assertEqual(0, returnVal)
 
@@ -2661,6 +2691,7 @@ Colocation Constraints:
 Location Constraints:
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
 
         output, returnVal = pcs(temp_cib, "resource ban dummy rh7-1")
@@ -2677,6 +2708,7 @@ Location Constraints:
     Disabled on: rh7-1 (score:-INFINITY) (role: Started) (id:cli-ban-dummy-on-rh7-1)
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
         self.assertEqual(0, returnVal)
 
@@ -2689,6 +2721,7 @@ Colocation Constraints:
 Location Constraints:
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
 
         output, returnVal = pcs(
@@ -2703,11 +2736,12 @@ Colocation Constraints:
 Location Constraints:
   Resource: dummy
     Constraint: cli-prefer-dummy
-      Rule: score=INFINITY boolean-op=and  (id:cli-prefer-rule-dummy)
+      Rule: boolean-op=and score=INFINITY  (id:cli-prefer-rule-dummy)
         Expression: #uname eq string rh7-1  (id:cli-prefer-expr-dummy)
         Expression: date lt {datetime}  (id:cli-prefer-lifetime-end-dummy)
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
         self.assertEqual(0, returnVal)
 
@@ -2720,6 +2754,7 @@ Colocation Constraints:
 Location Constraints:
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
 
         output, returnVal = pcs(
@@ -2737,11 +2772,12 @@ This will prevent dummy from running on rh7-1 until the constraint is removed. T
 Location Constraints:
   Resource: dummy
     Constraint: cli-ban-dummy-on-rh7-1
-      Rule: score=-INFINITY boolean-op=and  (id:cli-ban-dummy-on-rh7-1-rule)
+      Rule: boolean-op=and score=-INFINITY  (id:cli-ban-dummy-on-rh7-1-rule)
         Expression: #uname eq string rh7-1  (id:cli-ban-dummy-on-rh7-1-expr)
         Expression: date lt {datetime}  (id:cli-ban-dummy-on-rh7-1-lifetime)
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
         self.assertEqual(0, returnVal)
 
@@ -2818,6 +2854,7 @@ Location Constraints:
     Disabled on: rh7-1 (score:-INFINITY) (role: Started) (id:cli-ban-DG-clone-on-rh7-1)
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
         self.assertEqual(0, returnVal)
 
@@ -2830,6 +2867,7 @@ Colocation Constraints:
 Location Constraints:
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 """)
         self.assertEqual(0, returnVal)
 
@@ -2968,7 +3006,7 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
 
         o,r = pcs(temp_cib, "constraint")
         assert r == 0
-        ac(o,"Location Constraints:\n  Resource: DGroup\n    Enabled on: rh7-1 (score:INFINITY) (role: Started)\nOrdering Constraints:\nColocation Constraints:\n")
+        ac(o,"Location Constraints:\n  Resource: DGroup\n    Enabled on: rh7-1 (score:INFINITY) (role: Started)\nOrdering Constraints:\nColocation Constraints:\nTicket Constraints:\n")
 
         o,r = pcs(temp_cib, "resource delete D1")
         ac(o,"Deleting Resource - D1\n")
@@ -3016,8 +3054,13 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         assert returnVal == 0
 
         output,returnVal = pcs(temp_cib, "resource --full")
-        assert returnVal == 0
-        ac(output," Clone: dlm-clone\n  Meta Attrs: interleave=true clone-node-max=1 ordered=true \n  Resource: dlm (class=ocf provider=pacemaker type=controld)\n   Operations: monitor interval=10s (dlm-monitor-interval-10s)\n")
+        ac(output, """\
+ Clone: dlm-clone
+  Meta Attrs: clone-node-max=1 interleave=true ordered=true 
+  Resource: dlm (class=ocf provider=pacemaker type=controld)
+   Operations: monitor interval=10s (dlm-monitor-interval-10s)
+""")
+        self.assertEqual(0, returnVal)
 
         output, returnVal  = pcs(temp_cib, "resource delete dlm")
         assert returnVal == 0
@@ -3028,8 +3071,13 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         assert returnVal == 0
 
         output,returnVal = pcs(temp_cib, "resource --full")
-        assert returnVal == 0
-        assert output == " Clone: dlm-clone\n  Meta Attrs: interleave=true clone-node-max=1 ordered=true \n  Resource: dlm (class=ocf provider=pacemaker type=controld)\n   Operations: monitor interval=10s (dlm-monitor-interval-10s)\n", [output]
+        ac(output, """\
+ Clone: dlm-clone
+  Meta Attrs: clone-node-max=1 interleave=true ordered=true 
+  Resource: dlm (class=ocf provider=pacemaker type=controld)
+   Operations: monitor interval=10s (dlm-monitor-interval-10s)
+""")
+        self.assertEqual(0, returnVal)
 
         output, returnVal  = pcs(temp_cib, "resource delete dlm")
         assert returnVal == 0
@@ -3048,8 +3096,13 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         assert output == "Error: unable to create resource/fence device 'dlm-clone', 'dlm-clone' already exists on this system\n", [output]
 
         output,returnVal = pcs(temp_cib, "resource --full")
-        assert returnVal == 0
-        assert output == " Clone: dlm-clone\n  Meta Attrs: interleave=true clone-node-max=1 ordered=true \n  Resource: dlm (class=ocf provider=pacemaker type=controld)\n   Operations: monitor interval=10s (dlm-monitor-interval-10s)\n", [output]
+        ac(output, """\
+ Clone: dlm-clone
+  Meta Attrs: clone-node-max=1 interleave=true ordered=true 
+  Resource: dlm (class=ocf provider=pacemaker type=controld)
+   Operations: monitor interval=10s (dlm-monitor-interval-10s)
+""")
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_large_cib, "resource clone dummy1")
         ac(output, '')
@@ -3185,8 +3238,8 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         ac(o, ' Clone: D1-clone\n  Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n   Operations: monitor interval=60s (D1-monitor-interval-60s)\n')
 
         o, r = pcs(temp_cib, 'resource update D1-clone foo=bar')
-        assert r == 0
         ac(o, "")
+        self.assertEqual(0, r)
 
         o, r  = pcs(temp_cib, "resource --full")
         assert r == 0
@@ -3858,15 +3911,6 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
         ac(o,"Error: ClusterIP5 is already a master/slave resource\n")
         assert r == 1
 
-#    def testMasterLargeFile(self):
-#        o,r = pcs("largefile.xml","resource")
-#        ac(o,"")
-#        assert r == 0
-
-#        o,r = pcs("largefile.xml","resource master lxc-ms-master-4")
-#        ac(o,"")
-#        assert r == 0
-
     def groupMSAndClone(self):
         o,r = pcs("resource create --no-default-ops D1 Dummy --clone")
         ac(o,"")
@@ -3906,7 +3950,7 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
         assert r == 0
 
     def testVirtualDomainResource(self):
-        o,r = pcs("resource describe VirtualDomain")
+        dummy_o,r = pcs("resource describe VirtualDomain")
         assert r == 0
 
     def testResourceMissingValues(self):
@@ -4373,34 +4417,34 @@ Resource Utilization:
         self.assertEqual(0, returnVal)
 
     def test_resource_utilization_set_invalid(self):
-            output, returnVal = pcs(temp_large_cib, "resource utilization dummy0")
-            expected_out = """\
+        output, returnVal = pcs(temp_large_cib, "resource utilization dummy0")
+        expected_out = """\
 Error: Unable to find a resource: dummy0
 """
-            ac(expected_out, output)
-            self.assertEqual(1, returnVal)
+        ac(expected_out, output)
+        self.assertEqual(1, returnVal)
 
-            output, returnVal = pcs(
-                temp_large_cib, "resource utilization dummy0 test=10"
-            )
-            expected_out = """\
+        output, returnVal = pcs(
+            temp_large_cib, "resource utilization dummy0 test=10"
+        )
+        expected_out = """\
 Error: Unable to find a resource: dummy0
 """
-            ac(expected_out, output)
-            self.assertEqual(1, returnVal)
+        ac(expected_out, output)
+        self.assertEqual(1, returnVal)
 
-            output, returnVal = pcs(
-                temp_large_cib, "resource utilization dummy1 test1=10 test=int"
-            )
-            expected_out = """\
+        output, returnVal = pcs(
+            temp_large_cib, "resource utilization dummy1 test1=10 test=int"
+        )
+        expected_out = """\
 Error: Value of utilization attribute must be integer: 'test=int'
 """
-            ac(expected_out, output)
-            self.assertEqual(1, returnVal)
+        ac(expected_out, output)
+        self.assertEqual(1, returnVal)
 
 class ResourcesReferencedFromAclTest(unittest.TestCase, AssertPcsMixin):
     def setUp(self):
-        shutil.copy('empty-1.2.xml', temp_cib)
+        shutil.copy(rc('cib-empty-1.2.xml'), temp_cib)
         self.pcs_runner = PcsRunner(temp_cib)
 
     def test_remove_referenced_primitive_resource(self):
@@ -4434,7 +4478,94 @@ class ResourcesReferencedFromAclTest(unittest.TestCase, AssertPcsMixin):
             'Deleting Resource (and group) - dummy2',
         ])
 
+class CloneMasterUpdate(unittest.TestCase, AssertPcsMixin):
+    def setUp(self):
+        shutil.copy(empty_cib, temp_cib)
+        self.pcs_runner = PcsRunner(temp_cib)
+
+    def test_no_op_allowed_in_clone_update(self):
+        self.assert_pcs_success("resource create dummy Dummy --clone")
+        self.assert_pcs_success(
+            "resource show dummy-clone",
+            """\
+ Clone: dummy-clone
+  Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+   Operations: start interval=0s timeout=20 (dummy-start-interval-0s)
+               stop interval=0s timeout=20 (dummy-stop-interval-0s)
+               monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+"""
+        )
+        self.assert_pcs_fail(
+            "resource update dummy-clone op stop timeout=300",
+            "Error: op settings must be changed on base resource, not the clone\n"
+        )
+        self.assert_pcs_fail(
+            "resource update dummy-clone foo=bar op stop timeout=300",
+            "Error: op settings must be changed on base resource, not the clone\n"
+        )
+        self.assert_pcs_success(
+            "resource show dummy-clone",
+            """\
+ Clone: dummy-clone
+  Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+   Operations: start interval=0s timeout=20 (dummy-start-interval-0s)
+               stop interval=0s timeout=20 (dummy-stop-interval-0s)
+               monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+"""
+        )
+
+    def test_no_op_allowed_in_master_update(self):
+        self.assert_pcs_success("resource create dummy Dummy --master")
+        self.assert_pcs_success(
+            "resource show dummy-master",
+            """\
+ Master: dummy-master
+  Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+   Operations: start interval=0s timeout=20 (dummy-start-interval-0s)
+               stop interval=0s timeout=20 (dummy-stop-interval-0s)
+               monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+"""
+        )
+        self.assert_pcs_fail(
+            "resource update dummy-master op stop timeout=300",
+            "Error: op settings must be changed on base resource, not the master\n"
+        )
+        self.assert_pcs_fail(
+            "resource update dummy-master foo=bar op stop timeout=300",
+            "Error: op settings must be changed on base resource, not the master\n"
+        )
+        self.assert_pcs_success(
+            "resource show dummy-master",
+            """\
+ Master: dummy-master
+  Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+   Operations: start interval=0s timeout=20 (dummy-start-interval-0s)
+               stop interval=0s timeout=20 (dummy-stop-interval-0s)
+               monitor interval=10 timeout=20 (dummy-monitor-interval-10)
+"""
+        )
 
-if __name__ == "__main__":
-    unittest.main()
+class ResourceRemoveWithTicketTest(unittest.TestCase, AssertPcsMixin):
+    def setUp(self):
+        shutil.copy(rc('cib-empty-1.2.xml'), temp_cib)
+        self.pcs_runner = PcsRunner(temp_cib)
 
+    def test_remove_ticket(self):
+        self.assert_pcs_success('resource create A Dummy')
+        self.assert_pcs_success(
+            'constraint ticket add T master A loss-policy=fence'
+        )
+        self.assert_pcs_success(
+            'constraint ticket show',
+            [
+                "Ticket Constraints:",
+                "  Master A loss-policy=fence ticket=T",
+            ]
+        )
+        self.assert_pcs_success(
+            "resource delete A",
+            [
+                "Removing Constraint - ticket-T-A-Master",
+                "Deleting Resource - A",
+            ]
+        )
diff --git a/pcs/test/test_rule.py b/pcs/test/test_rule.py
index 17450e5..8cf717a 100644
--- a/pcs/test/test_rule.py
+++ b/pcs/test/test_rule.py
@@ -1,22 +1,23 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
-import os.path
-import sys
 import shutil
 import unittest
 import xml.dom.minidom
-parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-sys.path.insert(0, parentdir)
 
-from pcs_test_functions import pcs, ac
-import rule
+from pcs import rule
+from pcs.test.tools.misc import (
+    ac,
+    get_test_resource as rc,
+)
+from pcs.test.tools.pcs_runner import pcs
 
-
-empty_cib = "empty.xml"
-temp_cib = "temp.xml"
+empty_cib = rc("cib-empty.xml")
+temp_cib = rc("temp-cib.xml")
 
 class DateValueTest(unittest.TestCase):
 
@@ -1242,7 +1243,7 @@ class CibBuilderTest(unittest.TestCase):
         )
 
     def assertExpressionXml(self, rule_expression, rule_xml):
-        cib_dom = xml.dom.minidom.parse("empty.xml")
+        cib_dom = xml.dom.minidom.parse(empty_cib)
         constraints = cib_dom.getElementsByTagName("constraints")[0]
         constraint_el = constraints.appendChild(
             cib_dom.createElement("rsc_location")
@@ -1779,14 +1780,14 @@ Location Constraints:
       Rule: score=INFINITY  (id:location-dummy1-rule)
         Expression: #uname eq node1  (id:location-dummy1-rule-expr)
     Constraint: location-dummy1-1
-      Rule: score=100 role=master  (id:MyRule)
+      Rule: role=master score=100  (id:MyRule)
         Expression: #uname eq node2  (id:MyRule-expr)
     Constraint: location-dummy1-2
-      Rule: score=INFINITY boolean-op=or  (id:complexRule)
-        Rule: score=0 boolean-op=and  (id:complexRule-rule)
+      Rule: boolean-op=or score=INFINITY  (id:complexRule)
+        Rule: boolean-op=and score=0  (id:complexRule-rule)
           Expression: #uname eq node3  (id:complexRule-rule-expr)
           Expression: foo gt version 1.2  (id:complexRule-rule-expr-1)
-        Rule: score=0 boolean-op=and  (id:complexRule-rule-1)
+        Rule: boolean-op=and score=0  (id:complexRule-rule-1)
           Expression:  (id:complexRule-rule-1-expr)
             Date Spec: hours=12-23 weekdays=1-5  (id:complexRule-rule-1-expr-datespec)
           Expression: date in_range 2014-07-26 to duration  (id:complexRule-rule-1-expr-1)
@@ -1802,14 +1803,14 @@ Location Constraints:
       Rule: score=INFINITY
         Expression: #uname eq node1
     Constraint: location-dummy1-1
-      Rule: score=100 role=master
+      Rule: role=master score=100
         Expression: #uname eq node2
     Constraint: location-dummy1-2
-      Rule: score=INFINITY boolean-op=or
-        Rule: score=0 boolean-op=and
+      Rule: boolean-op=or score=INFINITY
+        Rule: boolean-op=and score=0
           Expression: #uname eq node3
           Expression: foo gt version 1.2
-        Rule: score=0 boolean-op=and
+        Rule: boolean-op=and score=0
           Expression:
             Date Spec: hours=12-23 weekdays=1-5
           Expression: date in_range 2014-07-26 to duration
@@ -1927,7 +1928,7 @@ Location Constraints:
         self.assertEqual(1, returnVal)
 
     def assertExpressionXml(self, rule_expression, rule_xml):
-        cib_dom = xml.dom.minidom.parse("empty.xml")
+        cib_dom = xml.dom.minidom.parse(empty_cib)
         constraints = cib_dom.getElementsByTagName("constraints")[0]
         constraint_el = constraints.appendChild(
             cib_dom.createElement("rsc_location")
@@ -1939,8 +1940,3 @@ Location Constraints:
             constraint_el.toprettyxml(indent="    "),
             rule_xml.lstrip().rstrip(" ")
         )
-
-
-if __name__ == "__main__":
-    unittest.main()
-
diff --git a/pcs/test/test_stonith.py b/pcs/test/test_stonith.py
index 7382edc..1257399 100644
--- a/pcs/test/test_stonith.py
+++ b/pcs/test/test_stonith.py
@@ -1,21 +1,23 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
-import os
-import sys
 import shutil
 import unittest
-parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-sys.path.insert(0, parentdir)
 
-import utils
-from pcs_test_functions import pcs, ac
+from pcs.test.tools.misc import (
+    ac,
+    get_test_resource as rc,
+)
+from pcs.test.tools.pcs_runner import pcs
 
+from pcs import utils
 
-empty_cib = "empty.xml"
-temp_cib = "temp.xml"
+empty_cib = rc("cib-empty.xml")
+temp_cib = rc("temp-cib.xml")
 
 class StonithTest(unittest.TestCase):
     def setUp(self):
@@ -24,11 +26,11 @@ class StonithTest(unittest.TestCase):
     def testStonithCreation(self):
         output, returnVal = pcs(temp_cib, "stonith create test1 fence_noxist")
         assert returnVal == 1
-        assert output == "Error: Unable to create resource 'stonith:fence_noxist', it is not installed on this system (use --force to override)\n"
+        assert output == "Error: fence agent 'fence_noxist' not found, use --force to override\n"
 
         output, returnVal = pcs(temp_cib, "stonith create test1 fence_noxist --force")
-        assert returnVal == 0
-        assert output == ""
+        ac(output, "Warning: fence agent 'fence_noxist' not found\n")
+        self.assertEqual(returnVal, 0)
 
         output, returnVal = pcs(temp_cib, "stonith create test2 fence_apc")
         assert returnVal == 1
@@ -72,7 +74,7 @@ class StonithTest(unittest.TestCase):
 
 # Testing that pcmk_host_check, pcmk_host_list & pcmk_host_map are allowed for
 # stonith agents
-        output, returnVal = pcs(temp_cib, 'stonith create apc-fencing fence_apc params ipaddr="morph-apc" login="apc" passwd="apc" switch="1" pcmk_host_map="buzz-01:1;buzz-02:2;buzz-03:3;buzz-04:4;buzz-05:5" action="reboot" debug="1" pcmk_host_check="static-list" pcmk_host_list="buzz-01,buzz-02,buzz-03,buzz-04,buzz-05"')
+        output, returnVal = pcs(temp_cib, 'stonith create apc-fencing fence_apc params ipaddr="morph-apc" login="apc" passwd="apc" switch="1" pcmk_host_map="buzz-01:1;buzz-02:2;buzz-03:3;buzz-04:4;buzz-05:5" pcmk_host_check="static-list" pcmk_host_list="buzz-01,buzz-02,buzz-03,buzz-04,buzz-05"')
         assert returnVal == 0
         ac(output,"")
 
@@ -83,7 +85,7 @@ class StonithTest(unittest.TestCase):
         output, returnVal = pcs(temp_cib, 'stonith show apc-fencing')
         ac(output, """\
  Resource: apc-fencing (class=stonith type=fence_apc)
-  Attributes: ipaddr="morph-apc" login="apc" passwd="apc" switch="1" pcmk_host_map="buzz-01:1;buzz-02:2;buzz-03:3;buzz-04:4;buzz-05:5" action="reboot" debug="1" pcmk_host_check="static-list" pcmk_host_list="buzz-01,buzz-02,buzz-03,buzz-04,buzz-05"
+  Attributes: ipaddr="morph-apc" login="apc" passwd="apc" switch="1" pcmk_host_map="buzz-01:1;buzz-02:2;buzz-03:3;buzz-04:4;buzz-05:5" pcmk_host_check="static-list" pcmk_host_list="buzz-01,buzz-02,buzz-03,buzz-04,buzz-05"
   Operations: monitor interval=60s (apc-fencing-monitor-interval-60s)
 """)
         assert returnVal == 0
@@ -145,6 +147,7 @@ Fencing Levels:
 Location Constraints:
 Ordering Constraints:
 Colocation Constraints:
+Ticket Constraints:
 
 Resources Defaults:
  No defaults set
@@ -152,6 +155,9 @@ Operations Defaults:
  No defaults set
 
 Cluster Properties:
+
+Quorum:
+  Options:
 """)
         assert returnVal == 0
 
@@ -285,7 +291,7 @@ Cluster Properties:
         # metadata from pacemaker, this will be reviewed and fixed.
         output, returnVal = pcs(
             temp_cib,
-            'stonith create apc-1 fence_apc params ipaddr="ip" login="apc" action="reboot"'
+            'stonith create apc-1 fence_apc params ipaddr="ip" login="apc"'
         )
 #        ac(output, """\
 #Error: missing required option(s): 'port' for resource type: stonith:fence_apc (use --force to override)
@@ -296,21 +302,21 @@ Cluster Properties:
 
         output, returnVal = pcs(
             temp_cib,
-            'stonith create apc-2 fence_apc params ipaddr="ip" login="apc" action="reboot" pcmk_host_map="buzz-01:1;buzz-02:2"'
+            'stonith create apc-2 fence_apc params ipaddr="ip" login="apc" pcmk_host_map="buzz-01:1;buzz-02:2"'
         )
         ac(output, "")
         self.assertEqual(returnVal, 0)
 
         output, returnVal = pcs(
             temp_cib,
-            'stonith create apc-3 fence_apc params ipaddr="ip" login="apc" action="reboot" pcmk_host_list="buzz-01,buzz-02"'
+            'stonith create apc-3 fence_apc params ipaddr="ip" login="apc" pcmk_host_list="buzz-01,buzz-02"'
         )
         ac(output, "")
         self.assertEqual(returnVal, 0)
 
         output, returnVal = pcs(
             temp_cib,
-            'stonith create apc-4 fence_apc params ipaddr="ip" login="apc" action="reboot" pcmk_host_argument="buzz-01"'
+            'stonith create apc-4 fence_apc params ipaddr="ip" login="apc" pcmk_host_argument="buzz-01"'
         )
         ac(output, "")
         self.assertEqual(returnVal, 0)
@@ -324,23 +330,23 @@ Cluster Properties:
         assert returnVal == 0
         assert output == ""
 
-        output, returnVal = pcs(temp_cib, "stonith create F1 fence_apc 'pcmk_host_list=nodea nodeb' ipaddr=ip login=lgn action=reboot")
+        output, returnVal = pcs(temp_cib, "stonith create F1 fence_apc 'pcmk_host_list=nodea nodeb' ipaddr=ip login=lgn")
         assert returnVal == 0
         ac(output,"")
 
-        output, returnVal = pcs(temp_cib, "stonith create F2 fence_apc 'pcmk_host_list=nodea nodeb' ipaddr=ip login=lgn action=reboot")
+        output, returnVal = pcs(temp_cib, "stonith create F2 fence_apc 'pcmk_host_list=nodea nodeb' ipaddr=ip login=lgn")
         assert returnVal == 0
         ac(output,"")
 
-        output, returnVal = pcs(temp_cib, "stonith create F3 fence_apc 'pcmk_host_list=nodea nodeb' ipaddr=ip login=lgn action=reboot")
+        output, returnVal = pcs(temp_cib, "stonith create F3 fence_apc 'pcmk_host_list=nodea nodeb' ipaddr=ip login=lgn")
         assert returnVal == 0
         ac(output,"")
 
-        output, returnVal = pcs(temp_cib, "stonith create F4 fence_apc 'pcmk_host_list=nodea nodeb' ipaddr=ip login=lgn action=reboot")
+        output, returnVal = pcs(temp_cib, "stonith create F4 fence_apc 'pcmk_host_list=nodea nodeb' ipaddr=ip login=lgn")
         assert returnVal == 0
         ac(output,"")
 
-        output, returnVal = pcs(temp_cib, "stonith create F5 fence_apc 'pcmk_host_list=nodea nodeb' ipaddr=ip login=lgn action=reboot")
+        output, returnVal = pcs(temp_cib, "stonith create F5 fence_apc 'pcmk_host_list=nodea nodeb' ipaddr=ip login=lgn")
         assert returnVal == 0
         ac(output,"")
 
@@ -415,7 +421,7 @@ Cluster Properties:
         output, returnVal = pcs(temp_cib, "stonith level")
         assert returnVal == 0
         assert output == ' Node: rh7-1\n  Level 1 - F3,F4\n  Level 2 - F5,F2\n Node: rh7-2\n  Level 2 - F2\n',[output]
-        
+
         output, returnVal = pcs(temp_cib, "stonith level clear rh7-1a")
         assert returnVal == 0
         output = ""
@@ -423,7 +429,7 @@ Cluster Properties:
         output, returnVal = pcs(temp_cib, "stonith level")
         assert returnVal == 0
         assert output == ' Node: rh7-1\n  Level 1 - F3,F4\n  Level 2 - F5,F2\n Node: rh7-2\n  Level 2 - F2\n',[output]
-        
+
         output, returnVal = pcs(temp_cib, "stonith level clear rh7-1")
         assert returnVal == 0
         output = ""
@@ -431,7 +437,7 @@ Cluster Properties:
         output, returnVal = pcs(temp_cib, "stonith level")
         assert returnVal == 0
         assert output == ' Node: rh7-2\n  Level 2 - F2\n',[output]
-        
+
         output, returnVal = pcs(temp_cib, "stonith level add 2 rh7-1 F5,F2")
         assert returnVal == 0
         assert output == ""
@@ -443,7 +449,7 @@ Cluster Properties:
         output, returnVal = pcs(temp_cib, "stonith level")
         assert returnVal == 0
         assert output == ' Node: rh7-1\n  Level 1 - F3,F4\n  Level 2 - F5,F2\n Node: rh7-2\n  Level 2 - F2\n',[output]
-        
+
         output, returnVal = pcs(temp_cib, "stonith level clear")
         assert returnVal == 0
         assert output == ""
@@ -505,7 +511,7 @@ Cluster Properties:
         o,r = pcs(temp_cib, "stonith level remove 5 rh7-1")
         assert r == 0
         assert o == ""
-        
+
         o,r = pcs(temp_cib, "stonith level remove 4 rh7-1 F2")
         assert r == 1
         assert o == "Error: unable to remove fencing level, fencing level for node: rh7-1, at level: 4, with device: F2 doesn't exist\n"
@@ -788,7 +794,7 @@ Cluster Properties:
         o,r = pcs(temp_cib, "status")
         assert "WARNING: no stonith devices and " in o
 
-        o,r = pcs(temp_cib, "stonith create test_stonith fence_apc ipaddr=ip login=lgn, action=reboot, pcmk_host_argument=node1")
+        o,r = pcs(temp_cib, "stonith create test_stonith fence_apc ipaddr=ip login=lgn,  pcmk_host_argument=node1")
         ac(o,"")
         assert r == 0
 
@@ -799,13 +805,9 @@ Cluster Properties:
         ac(o,"Deleting Resource - test_stonith\n")
         assert r == 0
 
-        o,r = pcs(temp_cib, "stonith create test_stonith fence_apc ipaddr=ip login=lgn, action=reboot, pcmk_host_argument=node1 --clone")
+        o,r = pcs(temp_cib, "stonith create test_stonith fence_apc ipaddr=ip login=lgn,  pcmk_host_argument=node1 --clone")
         ac(o,"")
         assert r == 0
 
         o,r = pcs(temp_cib, "status")
         assert "WARNING: no stonith devices and " not in o
-
-if __name__ == "__main__":
-    unittest.main()
-
diff --git a/pcs/test/test_utils.py b/pcs/test/test_utils.py
index 6a581d9..c61a2b8 100644
--- a/pcs/test/test_utils.py
+++ b/pcs/test/test_utils.py
@@ -1,25 +1,30 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
-import os.path
 import sys
-import shutil
 import unittest
 import xml.dom.minidom
 import xml.etree.cElementTree as ET
-currentdir = os.path.dirname(os.path.abspath(__file__))
-parentdir = os.path.dirname(currentdir)
-sys.path.insert(0, parentdir)
+from time import sleep
 
-import utils
-from pcs_test_functions import pcs, ac, get_child_elements
+try:
+    from cStringIO import StringIO
+except ImportError:
+    #python 3
+    from io import StringIO
 
+from pcs.test.tools.xml import dom_get_child_elements
+from pcs.test.tools.misc import get_test_resource as rc
 
-cib_with_nodes =  os.path.join(currentdir, "empty-withnodes.xml")
-empty_cib = os.path.join(currentdir, "empty.xml")
-temp_cib = os.path.join(currentdir, "temp.xml")
+from pcs import utils
+
+cib_with_nodes = rc("cib-empty-withnodes.xml")
+empty_cib = rc("cib-empty.xml")
+temp_cib = rc("temp-cib.xml")
 
 unittest.TestCase.maxDiff = None
 
@@ -662,27 +667,6 @@ class UtilsTest(unittest.TestCase):
         self.assertFalse(utils.is_score("10INFINITY"))
         self.assertFalse(utils.is_score("+10+INFINITY"))
 
-    def test_get_timeout_seconds(self):
-        self.assertEqual(utils.get_timeout_seconds("10"), 10)
-        self.assertEqual(utils.get_timeout_seconds("10s"), 10)
-        self.assertEqual(utils.get_timeout_seconds("10sec"), 10)
-        self.assertEqual(utils.get_timeout_seconds("10m"), 600)
-        self.assertEqual(utils.get_timeout_seconds("10min"), 600)
-        self.assertEqual(utils.get_timeout_seconds("10h"), 36000)
-        self.assertEqual(utils.get_timeout_seconds("10hr"), 36000)
-
-        self.assertEqual(utils.get_timeout_seconds("1a1s"), None)
-        self.assertEqual(utils.get_timeout_seconds("10mm"), None)
-        self.assertEqual(utils.get_timeout_seconds("10mim"), None)
-        self.assertEqual(utils.get_timeout_seconds("aaa"), None)
-        self.assertEqual(utils.get_timeout_seconds(""), None)
-
-        self.assertEqual(utils.get_timeout_seconds("1a1s", True), "1a1s")
-        self.assertEqual(utils.get_timeout_seconds("10mm", True), "10mm")
-        self.assertEqual(utils.get_timeout_seconds("10mim", True), "10mim")
-        self.assertEqual(utils.get_timeout_seconds("aaa", True), "aaa")
-        self.assertEqual(utils.get_timeout_seconds("", True), "")
-
     def get_cib_status_lrm(self):
         cib_dom = self.get_cib_empty()
         new_status = xml.dom.minidom.parseString("""
@@ -1448,7 +1432,7 @@ Membership information
         )
 
     def test_get_operations_from_transitions(self):
-        transitions = utils.parse(os.path.join(currentdir, "transitions01.xml"))
+        transitions = utils.parse(rc("transitions01.xml"))
         self.assertEqual(
             [
                 {
@@ -1491,7 +1475,7 @@ Membership information
             utils.get_operations_from_transitions(transitions)
         )
 
-        transitions = utils.parse(os.path.join(currentdir, "transitions02.xml"))
+        transitions = utils.parse(rc("transitions02.xml"))
         self.assertEqual(
             [
                 {
@@ -1782,34 +1766,35 @@ Membership information
     def test_dom_prepare_child_element(self):
         cib = self.get_cib_with_nodes_minidom()
         node = cib.getElementsByTagName("node")[0]
-        self.assertEqual(len(get_child_elements(node)), 0)
+        self.assertEqual(len(dom_get_child_elements(node)), 0)
         child = utils.dom_prepare_child_element(
             node, "utilization", "rh7-1-utilization"
         )
-        self.assertEqual(len(get_child_elements(node)), 1)
-        self.assertEqual(child, get_child_elements(node)[0])
-        self.assertEqual(get_child_elements(node)[0].tagName, "utilization")
+        self.assertEqual(len(dom_get_child_elements(node)), 1)
+        self.assertEqual(child, dom_get_child_elements(node)[0])
+        self.assertEqual(dom_get_child_elements(node)[0].tagName, "utilization")
         self.assertEqual(
-            get_child_elements(node)[0].getAttribute("id"), "rh7-1-utilization"
+            dom_get_child_elements(node)[0].getAttribute("id"),
+            "rh7-1-utilization"
         )
         child2 = utils.dom_prepare_child_element(
             node, "utilization", "rh7-1-utilization"
         )
-        self.assertEqual(len(get_child_elements(node)), 1)
+        self.assertEqual(len(dom_get_child_elements(node)), 1)
         self.assertEqual(child, child2)
 
     def test_dom_update_nv_pair_add(self):
         nv_set = xml.dom.minidom.parseString("<nvset/>").documentElement
         utils.dom_update_nv_pair(nv_set, "test_name", "test_val", "prefix-")
-        self.assertEqual(len(get_child_elements(nv_set)), 1)
-        pair = get_child_elements(nv_set)[0]
+        self.assertEqual(len(dom_get_child_elements(nv_set)), 1)
+        pair = dom_get_child_elements(nv_set)[0]
         self.assertEqual(pair.getAttribute("name"), "test_name")
         self.assertEqual(pair.getAttribute("value"), "test_val")
         self.assertEqual(pair.getAttribute("id"), "prefix-test_name")
         utils.dom_update_nv_pair(nv_set, "another_name", "value", "prefix2-")
-        self.assertEqual(len(get_child_elements(nv_set)), 2)
-        self.assertEqual(pair, get_child_elements(nv_set)[0])
-        pair = get_child_elements(nv_set)[1]
+        self.assertEqual(len(dom_get_child_elements(nv_set)), 2)
+        self.assertEqual(pair, dom_get_child_elements(nv_set)[0])
+        pair = dom_get_child_elements(nv_set)[1]
         self.assertEqual(pair.getAttribute("name"), "another_name")
         self.assertEqual(pair.getAttribute("value"), "value")
         self.assertEqual(pair.getAttribute("id"), "prefix2-another_name")
@@ -1822,9 +1807,9 @@ Membership information
         </nv_set>
         """).documentElement
         utils.dom_update_nv_pair(nv_set, "test_name", "new_value")
-        self.assertEqual(len(get_child_elements(nv_set)), 2)
-        pair1 = get_child_elements(nv_set)[0]
-        pair2 = get_child_elements(nv_set)[1]
+        self.assertEqual(len(dom_get_child_elements(nv_set)), 2)
+        pair1 = dom_get_child_elements(nv_set)[0]
+        pair2 = dom_get_child_elements(nv_set)[1]
         self.assertEqual(pair1.getAttribute("name"), "test_name")
         self.assertEqual(pair1.getAttribute("value"), "new_value")
         self.assertEqual(pair1.getAttribute("id"), "prefix-test_name")
@@ -1840,15 +1825,15 @@ Membership information
         </nv_set>
         """).documentElement
         utils.dom_update_nv_pair(nv_set, "non_existing_name", "")
-        self.assertEqual(len(get_child_elements(nv_set)), 2)
+        self.assertEqual(len(dom_get_child_elements(nv_set)), 2)
         utils.dom_update_nv_pair(nv_set, "another_name", "")
-        self.assertEqual(len(get_child_elements(nv_set)), 1)
-        pair = get_child_elements(nv_set)[0]
+        self.assertEqual(len(dom_get_child_elements(nv_set)), 1)
+        pair = dom_get_child_elements(nv_set)[0]
         self.assertEqual(pair.getAttribute("name"), "test_name")
         self.assertEqual(pair.getAttribute("value"), "test_val")
         self.assertEqual(pair.getAttribute("id"), "prefix-test_name")
         utils.dom_update_nv_pair(nv_set, "test_name", "")
-        self.assertEqual(len(get_child_elements(nv_set)), 0)
+        self.assertEqual(len(dom_get_child_elements(nv_set)), 0)
 
     def test_convert_args_to_tuples(self):
         out = utils.convert_args_to_tuples(
@@ -1860,6 +1845,11 @@ Membership information
         )
 
     def test_dom_update_utilization_invalid(self):
+        #commands writes to stderr
+        #we want clean test output, so we capture it
+        tmp_stderr = sys.stderr
+        sys.stderr = StringIO()
+
         el = xml.dom.minidom.parseString("""
         <resource id="test_id"/>
         """).documentElement
@@ -1873,6 +1863,8 @@ Membership information
             utils.dom_update_utilization, el, [("name", "0.01")]
         )
 
+        sys.stderr = tmp_stderr
+
     def test_dom_update_utilization_add(self):
         el = xml.dom.minidom.parseString("""
         <resource id="test_id"/>
@@ -1881,22 +1873,36 @@ Membership information
             el, [("name", ""), ("key", "-1"), ("keys", "90")]
         )
 
-        self.assertEqual(len(get_child_elements(el)), 1)
-        u = get_child_elements(el)[0]
+        self.assertEqual(len(dom_get_child_elements(el)), 1)
+        u = dom_get_child_elements(el)[0]
         self.assertEqual(u.tagName, "utilization")
         self.assertEqual(u.getAttribute("id"), "test_id-utilization")
-        self.assertEqual(len(get_child_elements(u)), 2)
+        self.assertEqual(len(dom_get_child_elements(u)), 2)
 
         self.assertEqual(
-            get_child_elements(u)[0].getAttribute("id"), "test_id-utilization-key"
+            dom_get_child_elements(u)[0].getAttribute("id"),
+            "test_id-utilization-key"
+        )
+        self.assertEqual(
+            dom_get_child_elements(u)[0].getAttribute("name"),
+            "key"
+        )
+        self.assertEqual(
+            dom_get_child_elements(u)[0].getAttribute("value"),
+            "-1"
         )
-        self.assertEqual(get_child_elements(u)[0].getAttribute("name"), "key")
-        self.assertEqual(get_child_elements(u)[0].getAttribute("value"), "-1")
         self.assertEqual(
-            get_child_elements(u)[1].getAttribute("id"), "test_id-utilization-keys"
+            dom_get_child_elements(u)[1].getAttribute("id"),
+            "test_id-utilization-keys"
+        )
+        self.assertEqual(
+            dom_get_child_elements(u)[1].getAttribute("name"),
+            "keys"
+        )
+        self.assertEqual(
+            dom_get_child_elements(u)[1].getAttribute("value"),
+            "90"
         )
-        self.assertEqual(get_child_elements(u)[1].getAttribute("name"), "keys")
-        self.assertEqual(get_child_elements(u)[1].getAttribute("value"), "90")
 
     def test_dom_update_utilization_update_remove(self):
         el = xml.dom.minidom.parseString("""
@@ -1911,13 +1917,20 @@ Membership information
             el, [("key", "100"), ("keys", "")]
         )
 
-        u = get_child_elements(el)[0]
-        self.assertEqual(len(get_child_elements(u)), 1)
+        u = dom_get_child_elements(el)[0]
+        self.assertEqual(len(dom_get_child_elements(u)), 1)
         self.assertEqual(
-            get_child_elements(u)[0].getAttribute("id"), "test_id-utilization-key"
+            dom_get_child_elements(u)[0].getAttribute("id"),
+            "test_id-utilization-key"
+        )
+        self.assertEqual(
+            dom_get_child_elements(u)[0].getAttribute("name"),
+            "key"
+        )
+        self.assertEqual(
+            dom_get_child_elements(u)[0].getAttribute("value"),
+            "100"
         )
-        self.assertEqual(get_child_elements(u)[0].getAttribute("name"), "key")
-        self.assertEqual(get_child_elements(u)[0].getAttribute("value"), "100")
 
     def test_dom_update_meta_attr_add(self):
         el = xml.dom.minidom.parseString("""
@@ -1927,22 +1940,36 @@ Membership information
             el, [("name", ""), ("key", "test"), ("key2", "val")]
         )
 
-        self.assertEqual(len(get_child_elements(el)), 1)
-        u = get_child_elements(el)[0]
+        self.assertEqual(len(dom_get_child_elements(el)), 1)
+        u = dom_get_child_elements(el)[0]
         self.assertEqual(u.tagName, "meta_attributes")
         self.assertEqual(u.getAttribute("id"), "test_id-meta_attributes")
-        self.assertEqual(len(get_child_elements(u)), 2)
+        self.assertEqual(len(dom_get_child_elements(u)), 2)
 
         self.assertEqual(
-            get_child_elements(u)[0].getAttribute("id"), "test_id-meta_attributes-key"
+            dom_get_child_elements(u)[0].getAttribute("id"),
+            "test_id-meta_attributes-key"
+        )
+        self.assertEqual(
+            dom_get_child_elements(u)[0].getAttribute("name"),
+            "key"
+        )
+        self.assertEqual(
+            dom_get_child_elements(u)[0].getAttribute("value"),
+            "test"
+        )
+        self.assertEqual(
+            dom_get_child_elements(u)[1].getAttribute("id"),
+            "test_id-meta_attributes-key2"
         )
-        self.assertEqual(get_child_elements(u)[0].getAttribute("name"), "key")
-        self.assertEqual(get_child_elements(u)[0].getAttribute("value"), "test")
         self.assertEqual(
-            get_child_elements(u)[1].getAttribute("id"), "test_id-meta_attributes-key2"
+            dom_get_child_elements(u)[1].getAttribute("name"),
+            "key2"
+        )
+        self.assertEqual(
+            dom_get_child_elements(u)[1].getAttribute("value"),
+            "val"
         )
-        self.assertEqual(get_child_elements(u)[1].getAttribute("name"), "key2")
-        self.assertEqual(get_child_elements(u)[1].getAttribute("value"), "val")
 
     def test_dom_update_meta_attr_update_remove(self):
         el = xml.dom.minidom.parseString("""
@@ -1957,13 +1984,20 @@ Membership information
             el, [("key", "another_val"), ("key2", "")]
         )
 
-        u = get_child_elements(el)[0]
-        self.assertEqual(len(get_child_elements(u)), 1)
+        u = dom_get_child_elements(el)[0]
+        self.assertEqual(len(dom_get_child_elements(u)), 1)
+        self.assertEqual(
+            dom_get_child_elements(u)[0].getAttribute("id"),
+            "test_id-meta_attributes-key"
+        )
         self.assertEqual(
-            get_child_elements(u)[0].getAttribute("id"), "test_id-meta_attributes-key"
+            dom_get_child_elements(u)[0].getAttribute("name"),
+            "key"
+        )
+        self.assertEqual(
+            dom_get_child_elements(u)[0].getAttribute("value"),
+            "another_val"
         )
-        self.assertEqual(get_child_elements(u)[0].getAttribute("name"), "key")
-        self.assertEqual(get_child_elements(u)[0].getAttribute("value"), "another_val")
 
     def test_get_utilization(self):
         el = xml.dom.minidom.parseString("""
@@ -2209,5 +2243,83 @@ Membership information
         )
         self.assertEqual(node.getAttribute("id"), node_id)
 
-if __name__ == "__main__":
-    unittest.main()
+class RunParallelTest(unittest.TestCase):
+    def fixture_create_worker(self, log, name, sleepSeconds=0):
+        def worker():
+            sleep(sleepSeconds)
+            log.append(name)
+        return worker
+
+    def test_run_all_workers(self):
+        log = []
+        utils.run_parallel(
+            [
+                self.fixture_create_worker(log, 'first'),
+                self.fixture_create_worker(log, 'second'),
+            ],
+            wait_seconds=.1
+        )
+
+        self.assertEqual(log, ['first', 'second'])
+
+    def test_wait_for_slower_workers(self):
+        log = []
+        utils.run_parallel(
+            [
+                self.fixture_create_worker(log, 'first', .03),
+                self.fixture_create_worker(log, 'second'),
+            ],
+            wait_seconds=.01
+        )
+
+        self.assertEqual(log, ['second', 'first'])
+
+class PrepareNodeNamesTest(unittest.TestCase):
+    def test_return_original_when_is_in_pacemaker_nodes(self):
+        node = 'test'
+        self.assertEqual(
+            node,
+            utils.prepare_node_name(node, {1: node}, {})
+        )
+
+    def test_return_original_when_is_not_in_corosync_nodes(self):
+        node = 'test'
+        self.assertEqual(
+            node,
+            utils.prepare_node_name(node, {}, {})
+        )
+
+    def test_return_original_when_corosync_id_not_in_pacemaker(self):
+        node = 'test'
+        self.assertEqual(
+            node,
+            utils.prepare_node_name(node, {}, {1: node})
+        )
+
+    def test_return_modified_name(self):
+        node = 'test'
+        self.assertEqual(
+            'another (test)',
+            utils.prepare_node_name(node, {1: 'another'}, {1: node})
+        )
+
+    def test_return_modified_name_with_pm_null_case(self):
+        node = 'test'
+        self.assertEqual(
+            '*Unknown* (test)',
+            utils.prepare_node_name(node, {1: '(null)'}, {1: node})
+        )
+
+class NodeActionTaskTest(unittest.TestCase):
+    def test_can_run_action(self):
+        def action(node, arg, kwarg=None):
+            return (0, ':'.join([node, arg, kwarg]))
+
+        report_list = []
+        def report(node, returncode, output):
+            report_list.append('|'.join([node, str(returncode), output]))
+
+        task = utils.create_task(report, action, 'node', 'arg', kwarg='kwarg')
+        task()
+
+        self.assertEqual(['node|0|node:arg:kwarg'], report_list)
diff --git a/pcs/test/tools/__init__.py b/pcs/test/tools/__init__.py
new file mode 100644
index 0000000..e69de29
diff --git a/pcs/test/tools/assertions.py b/pcs/test/tools/assertions.py
new file mode 100644
index 0000000..690a7d4
--- /dev/null
+++ b/pcs/test/tools/assertions.py
@@ -0,0 +1,188 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import doctest
+from lxml.doctestcompare import LXMLOutputChecker
+
+from pcs.lib.errors import LibraryError
+from pcs.test.tools.misc import prepare_diff
+
+def console_report(*lines):
+    #after lines append last new line
+    return "\n".join(lines + ("",))
+
+class AssertPcsMixin(object):
+    """Run pcs command and assert its result"""
+
+    def assert_pcs_success(self, command, stdout_full=None, stdout_start=None):
+        full = stdout_full
+        if stdout_start is None and stdout_full is None:
+            full = ""
+        self.assert_pcs_result(
+            command,
+            stdout_full=full,
+            stdout_start=stdout_start
+        )
+
+    def assert_pcs_fail(self, command, stdout_full=None, stdout_start=None):
+        self.assert_pcs_result(
+            command,
+            stdout_full=stdout_full,
+            stdout_start=stdout_start,
+            returncode=1
+        )
+
+    def assert_pcs_result(
+        self, command, stdout_full=None, stdout_start=None, returncode=0
+    ):
+        msg = "Please specify exactly one: stdout_start or stdout_full"
+        if stdout_start is None and stdout_full is None:
+            raise Exception(msg + ", none specified")
+        if stdout_start is not None and stdout_full is not None:
+            raise Exception(msg + ", both specified")
+
+        stdout, pcs_returncode = self.pcs_runner.run(command)
+        self.assertEqual(
+            returncode,
+            pcs_returncode,
+            (
+                'Expected return code "{0}", but was "{1}"'
+                + '\ncommand: {2}\nstdout:\n{3}'
+            ).format(returncode, pcs_returncode, command, stdout)
+        )
+        message_template = (
+            "{reason}\ncommand: {cmd}\ndiff is (expected is 2nd):\n{diff}"
+            +
+            "\nFull stdout:\n{stdout}"
+        )
+        if stdout_start:
+            expected_start = self.__prepare_output(stdout_start)
+            if not stdout.startswith(expected_start):
+                self.assertTrue(
+                    False,
+                    message_template.format(
+                        reason="Stdout does not start as expected",
+                        cmd=command,
+                        diff=prepare_diff(
+                            stdout[:len(expected_start)], expected_start
+                        ),
+                        stdout=stdout
+                    )
+                )
+        else:
+            expected_full = self.__prepare_output(stdout_full)
+            if stdout != expected_full:
+                self.assertEqual(
+                    stdout,
+                    expected_full,
+                    message_template.format(
+                        reason="Stdout is not as expected",
+                        cmd=command,
+                        diff=prepare_diff(stdout, expected_full),
+                        stdout=stdout
+                    )
+                )
+
+    def __prepare_output(self, output):
+        if isinstance(output, list):
+            return console_report(*output)
+        return output
+
+
+def assert_xml_equal(expected_xml, got_xml):
+    checker = LXMLOutputChecker()
+    if not checker.check_output(expected_xml, got_xml, 0):
+        raise AssertionError(checker.output_difference(
+            doctest.Example("", expected_xml),
+            got_xml,
+            0
+        ))
+
+def assert_report_item_equal(real_report_item, report_item_info):
+    if not __report_item_equal(real_report_item, report_item_info):
+        raise AssertionError(
+            "ReportItem not equal\nexpected: {0}\nactual:   {1}"
+            .format(
+                repr((
+                    report_item_info[0],
+                    report_item_info[1],
+                    report_item_info[2],
+                    None if len(report_item_info) < 4 else report_item_info[3]
+                )),
+                repr((
+                    real_report_item.severity,
+                    real_report_item.code,
+                    real_report_item.info,
+                    real_report_item.forceable
+                ))
+            )
+        )
+
+def assert_report_item_list_equal(real_report_item_list, report_info_list):
+    for report_item in real_report_item_list:
+        report_info_list.remove(
+            __find_report_info(report_info_list, report_item)
+        )
+    if report_info_list:
+        raise AssertionError(
+            "LibraryError is missing expected ReportItems ("
+            +str(len(report_info_list))+"):\n"
+            + "\n".join(map(repr, report_info_list))
+
+            + "\nreal ReportItems ("+str(len(real_report_item_list))+"):\n"
+            + "\n".join(map(repr, real_report_item_list))
+        )
+
+def assert_raise_library_error(callableObj, *report_info_list):
+    if not report_info_list:
+        raise AssertionError(
+            "Raising LibraryError expected, but no report item specified."
+            + " Please specify report items, that you expect in LibraryError"
+        )
+    try:
+        callableObj()
+        raise AssertionError("LibraryError not raised")
+    except LibraryError as e:
+        assert_report_item_list_equal(e.args, list(report_info_list))
+
+def __find_report_info(report_info_list, report_item):
+    for report_info in report_info_list:
+        if __report_item_equal(report_item, report_info):
+            return report_info
+    raise AssertionError(
+        "Unexpected report given: \n{0} \nexpected reports are: \n{1}"
+        .format(
+            repr((
+                report_item.severity,
+                report_item.code,
+                report_item.info,
+                report_item.forceable
+            )),
+            "\n".join(map(repr, report_info_list))
+        )
+    )
+
+def __report_item_equal(real_report_item, report_item_info):
+    return (
+        real_report_item.severity == report_item_info[0]
+        and
+        real_report_item.code == report_item_info[1]
+        and
+        #checks only presence and match of expected in info,
+        #extra info is ignored
+        all(
+            (k in real_report_item.info and real_report_item.info[k] == v)
+            for k, v in report_item_info[2].items()
+        )
+        and
+        (
+            real_report_item.forceable == (
+                None if len(report_item_info) < 4 else report_item_info[3]
+            )
+        )
+    )
+
diff --git a/pcs/test/tools/color_text_runner.py b/pcs/test/tools/color_text_runner.py
new file mode 100644
index 0000000..305fe32
--- /dev/null
+++ b/pcs/test/tools/color_text_runner.py
@@ -0,0 +1,112 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import sys
+major, minor = sys.version_info[:2]
+if major == 2 and minor == 6:
+    import unittest2 as unittest
+else:
+    import unittest
+
+
+palete = {
+    "black": '\033[30m',
+    "red": '\033[31m',
+    "green": '\033[32m',
+    "orange": '\033[33m',
+    "blue": '\033[34m',
+    "purple": '\033[35m',
+    "cyan": '\033[36m',
+    "lightgrey": '\033[37m',
+    "darkgrey": '\033[90m',
+    "lightred": '\033[91m',
+    "lightgreen": '\033[92m',
+    "yellow": '\033[93m',
+    "lightblue": '\033[94m',
+    "pink": '\033[95m',
+    "lightcyan": '\033[96m',
+    "end" : '\033[0m',
+    "bold" : '\033[1m',
+    "underline" : '\033[4m',
+}
+
+def apply(key_list, text):
+    return("".join([palete[key] for key in key_list]) + text + palete["end"])
+
+TextTestResult = unittest.runner.TextTestResult
+#pylint: disable=bad-super-call
+class ColorTextTestResult(TextTestResult):
+    def addSuccess(self, test):
+        super(TextTestResult, self).addSuccess(test)
+        if self.showAll:
+            self.stream.writeln(apply(["green", "bold"], "OK"))
+        elif self.dots:
+            self.stream.write(apply(["green", "bold"], "."))
+            self.stream.flush()
+
+    def addError(self, test, err):
+        super(TextTestResult, self).addError(test, err)
+        if self.showAll:
+            self.stream.writeln(apply(["red", "bold"], "ERROR"))
+        elif self.dots:
+            self.stream.write(apply(["red", "bold"], 'E'))
+            self.stream.flush()
+
+    def addFailure(self, test, err):
+        super(TextTestResult, self).addFailure(test, err)
+        if self.showAll:
+            self.stream.writeln(apply(["lightred", "bold"], "FAIL"))
+        elif self.dots:
+            self.stream.write(apply(["lightred", "bold"], 'F'))
+            self.stream.flush()
+
+    def getDescription(self, test):
+        doc_first_line = test.shortDescription()
+        if self.descriptions and doc_first_line:
+            return '\n'.join((str(test), doc_first_line))
+        else:
+            module_parts = test.__class__.__module__.split(".")
+            module = module_parts[-1]
+            package = ".".join(module_parts[:-1])+"." if module_parts else ""
+
+            return (
+                test._testMethodName
+                +" "
+                +apply(["lightgrey"], "(")
+                +apply(["lightgrey"], package)
+                +apply(["bold"], module)
+                +"."
+                +test.__class__.__name__
+                +apply(["lightgrey"], ")")
+            )
+
+    def __format_test_name(self, test):
+        return (
+            test.__class__.__module__
+            + "." + test.__class__.__name__
+            + "." + test._testMethodName
+        )
+
+    def printErrors(self):
+        super(ColorTextTestResult, self).printErrors()
+        if not self.errors and not self.failures:
+            return
+
+        self.stream.writeln()
+        self.stream.writeln(self.separator1)
+        self.stream.writeln()
+        self.stream.writeln(
+            "for running failed tests only (errors are first then failures):"
+        )
+        self.stream.writeln()
+        self.stream.write(" \\\n".join(
+            [
+                self.__format_test_name(test)
+                for test, _ in self.errors + self.failures
+            ]
+        ))
+        self.stream.writeln()
diff --git a/pcs/test/tools/custom_mock.py b/pcs/test/tools/custom_mock.py
new file mode 100644
index 0000000..c038d28
--- /dev/null
+++ b/pcs/test/tools/custom_mock.py
@@ -0,0 +1,24 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+from pcs.lib.errors import LibraryError, ReportItemSeverity
+
+class MockLibraryReportProcessor(object):
+    def __init__(self):
+        self.report_item_list = []
+
+    def process(self, report_item):
+        self.process_list([report_item])
+
+    def process_list(self, report_item_list):
+        self.report_item_list.extend(report_item_list)
+        errors = [
+            item for item in report_item_list
+            if item.severity == ReportItemSeverity.ERROR
+        ]
+        if errors:
+            raise LibraryError(*errors)
diff --git a/pcs/test/tools/misc.py b/pcs/test/tools/misc.py
new file mode 100644
index 0000000..a78ccdc
--- /dev/null
+++ b/pcs/test/tools/misc.py
@@ -0,0 +1,52 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import difflib
+import os.path
+import re
+
+from pcs import utils
+
+
+testdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+
+def prepare_diff(first, second):
+    """
+    Return a string containing a diff of first and second
+    """
+    return "".join(
+        difflib.Differ().compare(first.splitlines(1), second.splitlines(1))
+    )
+
+def ac(a,b):
+    """
+    Compare the actual output 'a' and an expected output 'b', print diff b a
+    """
+    if a != b:
+        raise AssertionError(
+            "strings not equal:\n{0}".format(prepare_diff(b, a))
+        )
+
+def get_test_resource(name):
+    """Return full path to a test resource file specified by name"""
+    return os.path.join(testdir, "resources", name)
+
+def is_minimum_pacemaker_version(cmajor, cminor, crev):
+    output, dummy_retval = utils.run(["crm_mon", "--version"])
+    pacemaker_version = output.split("\n")[0]
+    r = re.compile(r"Pacemaker (\d+)\.(\d+)\.(\d+)")
+    m = r.match(pacemaker_version)
+    major = int(m.group(1))
+    minor = int(m.group(2))
+    rev = int(m.group(3))
+    return (
+        major > cmajor
+        or
+        (major == cmajor and minor > cminor)
+        or
+        (major == cmajor and minor == cminor and rev >= crev)
+    )
diff --git a/pcs/test/tools/pcs_mock.py b/pcs/test/tools/pcs_mock.py
new file mode 100644
index 0000000..d84ac67
--- /dev/null
+++ b/pcs/test/tools/pcs_mock.py
@@ -0,0 +1,13 @@
+try:
+    import unittest.mock as mock
+except ImportError:
+    import mock
+
+if not hasattr(mock.Mock, "assert_not_called"):
+    def __assert_not_called(self, *args, **kwargs):
+        if self.call_count != 0:
+            msg = ("Expected '%s' to not have been called. Called %s times." %
+                   (self._mock_name or 'mock', self.call_count))
+            raise AssertionError(msg)
+    mock.Mock.assert_not_called = __assert_not_called
+
diff --git a/pcs/test/tools/pcs_runner.py b/pcs/test/tools/pcs_runner.py
new file mode 100644
index 0000000..5f43cdc
--- /dev/null
+++ b/pcs/test/tools/pcs_runner.py
@@ -0,0 +1,78 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import os.path
+
+from pcs.test.tools.misc import get_test_resource as rc
+
+from pcs import utils
+
+__pcs_location = os.path.join(
+    os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))),
+    "pcs"
+)
+_temp_cib = rc("temp-cib.xml")
+
+
+class PcsRunner(object):
+    def __init__(
+        self, cib_file=_temp_cib, corosync_conf_file=None, cluster_conf_file=None
+    ):
+        self.cib_file = cib_file
+        self.corosync_conf_file = (
+            rc("corosync.conf") if corosync_conf_file is None
+            else corosync_conf_file
+        )
+        self.cluster_conf_file = (
+            rc("corosync.conf") if cluster_conf_file is None
+            else cluster_conf_file
+        )
+
+    def run(self, args):
+        args_with_files = (
+            "--corosync_conf={0} ".format(self.corosync_conf_file)
+            + "--cluster_conf={0} ".format(self.cluster_conf_file)
+            + args
+        )
+        return pcs(self.cib_file, args_with_files)
+
+
+def pcs(testfile, args = ""):
+    """
+    Run pcs with -f on specified file
+    Return tuple with:
+        shell stdoutdata
+        shell returncode
+    """
+    if args == "":
+        args = testfile
+        testfile = _temp_cib
+    arg_split = args.split()
+    arg_split_temp = []
+    in_quote = False
+    for arg in arg_split:
+        if in_quote:
+            arg_split_temp[-1] = arg_split_temp[-1] + " " + arg.replace("'", "")
+            if arg.find("'") != -1:
+                in_quote = False
+        else:
+            arg_split_temp.append(arg.replace("'", ""))
+            if arg.find("'") != -1 and not (arg[0] == "'" and arg[-1] == "'"):
+                in_quote = True
+
+    conf_opts = []
+    if "--corosync_conf" not in args:
+        corosync_conf = rc("corosync.conf")
+        conf_opts.append("--corosync_conf=" + corosync_conf)
+    if "--cluster_conf" not in args:
+        cluster_conf = rc("cluster.conf")
+        conf_opts.append("--cluster_conf=" + cluster_conf)
+    return utils.run(
+        [__pcs_location, "-f", testfile] + conf_opts + arg_split_temp
+    )
+
+
diff --git a/pcs/test/tools/xml.py b/pcs/test/tools/xml.py
new file mode 100644
index 0000000..e4a160d
--- /dev/null
+++ b/pcs/test/tools/xml.py
@@ -0,0 +1,51 @@
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
+
+import xml.dom.minidom
+from lxml import etree
+
+
+def dom_get_child_elements(element):
+    return [
+        child for child in element.childNodes
+        if child.nodeType == xml.dom.minidom.Node.ELEMENT_NODE
+    ]
+
+
+class XmlManipulation(object):
+    @classmethod
+    def from_file(cls, file_name):
+        return cls(etree.parse(file_name).getroot())
+
+    @classmethod
+    def from_str(cls, string):
+        return cls(etree.fromstring(string))
+
+    def __init__(self, tree):
+        self.tree = tree
+
+    def __append_to_child(self, element, xml_string):
+        element.append(etree.fromstring(xml_string))
+
+    def append_to_first_tag_name(self, tag_name, *xml_string_list):
+        for xml_string in xml_string_list:
+            self.__append_to_child(
+                self.tree.find(".//{0}".format(tag_name)), xml_string
+            )
+        return self
+
+    def __str__(self):
+        #etree returns string in bytes: b'xml'
+        #python 3 removed .encode() from byte strings
+        #run(...) calls subprocess.Popen.communicate which calls encode...
+        #so there is bytes to str conversion
+        return etree.tostring(self.tree).decode()
+
+
+def get_xml_manipulation_creator_from_file(file_name):
+    return lambda: XmlManipulation.from_file(file_name)
+
diff --git a/pcs/usage.py b/pcs/usage.py
index f412ce9..50e1fd1 100644
--- a/pcs/usage.py
+++ b/pcs/usage.py
@@ -1,14 +1,15 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
 import re
 
 
 examples = ""
 def full_usage():
-    global examples
     out = ""
     out += main(False)
     out += strip_extras(resource([],False))
@@ -16,7 +17,9 @@ def full_usage():
     out += strip_extras(stonith([],False))
     out += strip_extras(property([],False))
     out += strip_extras(constraint([],False))
+    out += strip_extras(node([],False))
     out += strip_extras(acl([],False))
+    out += strip_extras(quorum([],False))
     out += strip_extras(status([],False))
     out += strip_extras(config([],False))
     out += strip_extras(pcsd([],False))
@@ -97,37 +100,7 @@ def dict_depth(d, depth=0):
         return depth
     return max(dict_depth(v, depth+1) for k, v in d.items())
 
-def sub_gen_code(level,item,prev_level=[],spaces=""):
-    out = ""
-
-    if dict_depth(item) <= level:
-        return ""
-
-    out += 'case "${cur' + str(level) + '}" in\n'
-    next_level = []
-    for key,val in item.items():
-        if len(val) == 0:
-            continue
-        values = " ".join(val.keys())
-        values = values.replace("|"," ")
-        out += "  " + key + ")\n"
-        if len(val) > 0 and level != 1:
-            out += sub_gen_code(level-1,item[key],[] ,spaces + "  ")
-        else:
-            out += "    " + 'COMPREPLY=($(compgen -W "' + values + '" -- ${cur}))\n'
-            out += "    return 0\n"
-        out += "    ;;\n"
-    out += "  *)\n"
-    out += "  ;;\n"
-    out += 'esac\n'
-    temp = out.split('\n')
-    new_out = ""
-    for l in temp:
-        new_out += spaces + l + "\n"
-    return new_out
-
-
-def sub_generate_bash_completion():
+def generate_completion_tree_from_usage():
     tree = {}
     tree["resource"] = generate_tree(resource([],False))
     tree["cluster"] = generate_tree(cluster([],False))
@@ -135,39 +108,16 @@ def sub_generate_bash_completion():
     tree["property"] = generate_tree(property([],False))
     tree["acl"] = generate_tree(acl([],False))
     tree["constraint"] = generate_tree(constraint([],False))
+    tree["quorum"] = generate_tree(quorum([],False))
     tree["status"] = generate_tree(status([],False))
     tree["config"] = generate_tree(config([],False))
     tree["pcsd"] = generate_tree(pcsd([],False))
     tree["node"] = generate_tree(node([], False))
-    print("""
-    _pcs()
-    {
-    local cur cur1 cur2 cur3
-    COMPREPLY=()
-    cur="${COMP_WORDS[COMP_CWORD]}"
-    if [ "$COMP_CWORD" -gt "0" ]; then cur1="${COMP_WORDS[COMP_CWORD-1]}";fi
-    if [ "$COMP_CWORD" -gt "1" ]; then cur2="${COMP_WORDS[COMP_CWORD-2]}";fi
-    if [ "$COMP_CWORD" -gt "2" ]; then cur3="${COMP_WORDS[COMP_CWORD-3]}";fi
-
-    """)
-    print(sub_gen_code(3,tree,[]))
-    print(sub_gen_code(2,tree,[]))
-    print(sub_gen_code(1,tree,[]))
-    print("""
-    if [ $COMP_CWORD -eq 1 ]; then
-        COMPREPLY=( $(compgen -W "resource cluster stonith property acl constraint status config pcsd node" -- $cur) )
-    fi
-    return 0
-
-    }
-    complete -F _pcs pcs
-    """)
-
+    return tree
 
 def generate_tree(usage_txt):
     ignore = True
     ret_hash = {}
-    cur_stack = []
     for l in usage_txt.split('\n'):
         if l.startswith("Commands:"):
             ignore = False
@@ -211,6 +161,7 @@ Commands:
     constraint  Set resource constraints
     property    Set pacemaker properties
     acl         Set pacemaker access control lists
+    quorum      Manage cluster quorum settings
     status      View cluster status
     config      View and manage cluster configuration
     pcsd        Manage pcs daemon
@@ -230,7 +181,7 @@ Usage: pcs resource [commands]...
 Manage pacemaker resources
 
 Commands:
-    show [resource id] [--full] [--groups]
+    [show [resource id]] [--full] [--groups]
         Show all currently configured resources or if a resource is specified
         show the options for the configured resource.  If --full is specified
         all configured resource options will be displayed.  If --groups is
@@ -474,13 +425,14 @@ Commands:
         Set default values for resources, if no options are passed, lists
         currently configured defaults
 
-    cleanup [<resource id>]
-        Cleans up the resource in the lrmd (useful to reset the resource
-        status and failcount).  This tells the cluster to forget the
-        operation history of a resource and re-detect its current state.
-        This can be useful to purge knowledge of past failures that have
-        since been resolved. If a resource id is not specified then all
-        resources/stonith devices will be cleaned up.
+    cleanup [<resource id>] [--node <node>]
+        Cleans up the resource in the lrmd (useful to reset the resource status
+        and failcount).  This tells the cluster to forget the operation history
+        of a resource and re-detect its current state.  This can be useful to
+        purge knowledge of past failures that have since been resolved.  If a
+        resource id is not specified then all resources/stonith devices will be
+        cleaned up.  If a node is not specified then resources on all nodes
+        will be cleaned up.
 
     failcount show <resource id> [node]
         Show current failcount for specified resource from all nodes or
@@ -574,8 +526,9 @@ Commands:
         the remote nodes with each other).  Using --force forces
         re-authentication to occur.
 
-    setup [--start] [--local] [--enable] --name <cluster name> <node1[,node1-altaddr]>
-            [node2[,node2-altaddr]] [..] [--transport <udpu|udp>] [--rrpmode active|passive]
+    setup [--start [--wait[=<n>]]] [--local] [--enable] --name <cluster name>
+            <node1[,node1-altaddr]> [<node2[,node2-altaddr]>] [...]
+            [--transport udpu|udp] [--rrpmode active|passive]
             [--addr0 <addr/net> [[[--mcast0 <address>] [--mcastport0 <port>]
                             [--ttl0 <ttl>]] | [--broadcast0]]
             [--addr1 <addr/net> [[[--mcast1 <address>] [--mcastport1 <port>]
@@ -588,6 +541,7 @@ Commands:
         Configure corosync and sync configuration out to listed nodes.
         --local will only perform changes on the local node,
         --start will also start the cluster on the specified nodes,
+        --wait will wait up to 'n' seconds for the nodes to start,
         --enable will enable corosync and pacemaker on node startup,
         --transport allows specification of corosync transport (default: udpu;
             udp for CMAN clusters),
@@ -630,11 +584,12 @@ Commands:
         ttl defaults to 1. If --broadcast is specified, --mcast0/1,
         --mcastport0/1 & --ttl0/1 are ignored.
 
-    start [--all] [node] [...]
+    start [--all] [node] [...] [--wait[=<n>]]
         Start corosync & pacemaker on specified node(s), if a node is not
         specified then corosync & pacemaker are started on the local node.
         If --all is specified then corosync & pacemaker are started on all
-        nodes.
+        nodes.  If --wait is specified, wait up to 'n' seconds for nodes
+        to start.
 
     stop [--all] [node] [...]
         Stop corosync & pacemaker on specified node(s), if a node is not
@@ -659,18 +614,6 @@ Commands:
         pacemaker are disabled on all nodes. (Note: this is the default after
         installation)
 
-    standby [<node>] | --all
-        Put specified node into standby mode (the node specified will no longer
-        be able to host resources), if no node or options are specified the
-        current node will be put into standby mode, if --all is specified all
-        nodes will be put into standby mode.
-
-    unstandby [<node>] | --all
-        Remove node from standby mode (the node specified will now be able to
-        host resources), if no node or options are specified the current node
-        will be removed from standby mode, if --all is specified all nodes will
-        be removed from standby mode.
-
     remote-node add <hostname> <resource id> [options]
         Enables the specified resource as a remote-node resource on the
         specified hostname (hostname should be the same as 'uname -n')
@@ -733,11 +676,12 @@ Commands:
         --config is recommended.  Do not specify a scope if you need to edit
         the whole CIB or be warned in the case of outdated CIB.
 
-    node add <node[,node-altaddr]> [--start] [--enable]
+    node add <node[,node-altaddr]> [--start [--wait[=<n>]]] [--enable]
         Add the node to corosync.conf and corosync on all nodes in the cluster
-        and sync the new corosync.conf to the new node.  If --start is specified
-        also start corosync/pacemaker on the new node, if --enable is specified
-        enable corosync/pacemaker on new node.
+        and sync the new corosync.conf to the new node.  If --start is
+        specified also start corosync/pacemaker on the new node, if --wait is
+        sepcified wait up to 'n' seconds for the new node to start.  If --enable
+        is specified enable corosync/pacemaker on new node.
         When using Redundant Ring Protocol (RRP) with udpu transport, specify
         the ring 0 address first followed by a ',' and then the ring 1 address.
 
@@ -795,7 +739,7 @@ Usage: pcs stonith [commands]...
 Configure fence devices for use with pacemaker
 
 Commands:
-    show [stonith id] [--full]
+    [show [stonith id]] [--full]
         Show all currently configured stonith devices or if a stonith id is
         specified show the options for the configured stonith device.  If
         --full is specified all configured stonith options will be displayed
@@ -819,13 +763,14 @@ Commands:
     delete <stonith id>
         Remove stonith id from configuration
 
-    cleanup [<stonith id>]
-        Cleans up the stonith device in the lrmd (useful to reset the
-        status and failcount).  This tells the cluster to forget the
-        operation history of a stonith device and re-detect its current state.
-        This can be useful to purge knowledge of past failures that have
-        since been resolved. If a stonith id is not specified then all
-        resources/stonith devices will be cleaned up.
+    cleanup [<stonith id>] [--node <node>]
+        Cleans up the stonith device in the lrmd (useful to reset the status
+        and failcount).  This tells the cluster to forget the operation history
+        of a stonith device and re-detect its current state.  This can be
+        useful to purge knowledge of past failures that have since been
+        resolved.  If a stonith id is not specified then all resources/stonith
+        devices will be cleaned up.  If a node is not specified then resources
+        on all nodes will be cleaned up.
 
     level
         Lists all of the fencing levels currently configured
@@ -877,7 +822,7 @@ Usage: pcs property [commands]...
 Configure pacemaker properties
 
 Commands:
-    list|show [<property> | --all | --defaults]
+    [list|show [<property> | --all | --defaults]] | [--all | --defaults]
         List property settings (default: lists configured properties).
         If --defaults is specified will show all property defaults, if --all
         is specified, current configured properties will be shown with unset
@@ -969,8 +914,8 @@ Commands:
         Available options are kind=Optional/Mandatory/Serialize,
         symmetrical=true/false, require-all=true/false and id=<constraint-id>.
 
-    order set <resource1> <resource2> [resourceN]... [options] [set
-              <resourceX> <resourceY> ... [options]]
+    order set <resource1> [resourceN]... [options] [set
+              <resourceX> ... [options]]
               [setoptions [constraint_options]]
         Create an ordered set of resources.
         Available options are sequential=true/false, require-all=true/false,
@@ -996,8 +941,8 @@ Commands:
         A role can be master or slave (if no role is specified, it defaults to
         'started').
 
-    colocation set <resource1> <resource2> [resourceN]... [options]
-               [set <resourceX> <resourceY> ... [options]]
+    colocation set <resource1> [resourceN]... [options]
+               [set <resourceX> ... [options]]
                [setoptions [constraint_options]]
         Create a colocation constraint with a resource set.
         Available options are sequential=true/false, require-all=true/false,
@@ -1008,6 +953,25 @@ Commands:
     colocation remove <source resource id> <target resource id>
         Remove colocation constraints with <source resource>
 
+    ticket show [--full]
+        List all current ticket constraints (if --full is specified show
+        the internal constraint id's as well).
+
+    ticket set <resource1> [resourceN]... [options]
+               [set <resourceX> ... [options]]
+               [setoptions [constraint_options]]
+        Create a ticket constraint with a resource set.
+        Available options are sequential=true/false, require-all=true/false,
+        action=start/promote/demote/stop and role=Stopped/Started/Master/Slave.
+        Required constraint option is ticket.
+        Optional constraint option is loss-policy=fence/stop/freeze/demote
+
+    ticket add <ticket> [<role>] <resource id> [options]
+               [id=constraint-id]
+        Crate a ticket constraint for <resource id>.
+        Available option is loss-policy=fence/stop/freeze/demote.
+        A role can be master, slave, started or stopped.
+
     remove [constraint id]...
         Remove constraint(s) or constraint rules with the specified id(s)
 
@@ -1230,16 +1194,36 @@ Usage: pcs node <command>
 Manage cluster nodes
 
 Commands:
-    maintenance [--all] | [node]...
+    maintenance [--all] | [<node>]...
         Put specified node(s) into maintenance mode, if no node or options are
         specified the current node will be put into maintenance mode, if --all
         is specified all nodes will be put into maintenace mode.
 
-    unmaintenance [--all] | [node]...
+    unmaintenance [--all] | [<node>]...
         Remove node(s) from maintenance mode, if no node or options are
         specified the current node will be removed from maintenance mode,
         if --all is specified all nodes will be removed from maintenance mode.
 
+    standby [--all | <node>] [--wait[=n]]
+        Put specified node into standby mode (the node specified will no longer
+        be able to host resources), if no node or options are specified the
+        current node will be put into standby mode, if --all is specified all
+        nodes will be put into standby mode.
+        If --wait is specified, pcs will wait up to 'n' seconds for the node(s)
+        to be put into standby mode and then return 0 on success or 1 if
+        the operation not succeeded yet.  If 'n' is not specified it defaults
+        to 60 minutes.
+
+    unstandby [--all | <node>] [--wait[=n]]
+        Remove node from standby mode (the node specified will now be able to
+        host resources), if no node or options are specified the current node
+        will be removed from standby mode, if --all is specified all nodes will
+        be removed from standby mode.
+        If --wait is specified, pcs will wait up to 'n' seconds for the node(s)
+        to be removed from standby mode and then return 0 on success or 1 if
+        the operation not succeeded yet.  If 'n' is not specified it defaults
+        to 60 minutes.
+
     utilization [<node> [<name>=<value> ...]]
         Add specified utilization options to specified node. If node is not
         specified, shows utilization of all nodes. If utilization options are
@@ -1252,3 +1236,53 @@ Commands:
         print(sub_usage(args, output))
     else:
         return output
+
+def quorum(args=[], pout=True):
+    output = """
+Usage: pcs quorum <command>
+Manage cluster quorum settings
+
+Commands:
+    config
+        Show quorum configuration.
+
+    device add [generic options] model <device model> [model options]
+        Add quorum device to cluster.
+
+    device remove
+        Remove quorum device from cluster.
+
+    device update [generic options] [model <model options>]
+        Add/Change quorum device options.  Requires cluster to be stopped.
+
+    update [auto_tie_breaker=[0|1]] [last_man_standing=[0|1]]
+            [last_man_standing_window=[<time in ms>]] [wait_for_all=[0|1]]
+        Add/Change quorum options.  At least one option must be specified.
+        Options are documented in corosync's votequorum(5) man page.  Requires
+        cluster to be stopped.
+"""
+    if pout:
+        print(sub_usage(args, output))
+    else:
+        return output
+
+def show(main_usage_name, rest_usage_names):
+    usage_map = {
+        "acl": acl,
+        "cluster": cluster,
+        "config": config,
+        "constraint": constraint,
+        "node": node,
+        "pcsd": pcsd,
+        "property": property,
+        "quorum": quorum,
+        "resource": resource,
+        "status": status,
+        "stonith": stonith,
+    }
+    if main_usage_name not in usage_map:
+        raise Exception(
+            "Bad usage name '{0}' there can be '{1}'"
+            .format(main_usage_name,  list(usage_map.keys()))
+        )
+    usage_map[main_usage_name](rest_usage_names)
diff --git a/pcs/utils.py b/pcs/utils.py
index 18daa6e..9041fd4 100644
--- a/pcs/utils.py
+++ b/pcs/utils.py
@@ -1,7 +1,9 @@
-from __future__ import absolute_import
-from __future__ import division
-from __future__ import print_function
-from __future__ import unicode_literals
+from __future__ import (
+    absolute_import,
+    division,
+    print_function,
+    unicode_literals,
+)
 
 import os
 import sys
@@ -18,13 +20,11 @@ import signal
 import time
 from io import BytesIO
 import tarfile
-import fcntl
 import getpass
 import base64
+import threading
+import logging
 
-
-from errors import ReportItem
-from errors import ReportItemSeverity
 try:
     # python2
     from urllib import urlencode as urllib_urlencode
@@ -54,10 +54,33 @@ except ImportError:
         URLError as urllib_URLError
     )
 
-import settings
-import resource
-import cluster
-import corosync_conf as corosync_conf_utils
+
+from pcs import settings, usage
+from pcs.cli.common.reports import (
+    process_library_reports as process_lib_reports,
+    LibraryReportProcessorToConsole as LibraryReportProcessorToConsole,
+)
+from pcs.common.tools import simple_cache
+from pcs.lib.env import LibraryEnvironment
+from pcs.lib.errors import LibraryError
+import pcs.lib.corosync.config_parser as corosync_conf_parser
+from pcs.lib.external import (
+    is_cman_cluster,
+    CommandRunner,
+)
+import pcs.lib.resource_agent as lib_ra
+from pcs.lib.corosync.config_facade import ConfigFacade as corosync_conf_facade
+from pcs.lib.pacemaker import has_resource_wait_support
+from pcs.lib.pacemaker_state import ClusterState
+from pcs.lib.pacemaker_values import(
+    validate_id,
+    is_boolean,
+    timeout_to_seconds as get_timeout_seconds,
+    is_score_value,
+)
+from pcs.cli.common import middleware
+from pcs.cli.common.env import Env
+from pcs.cli.common.lib_wrapper import Library
 
 
 PYTHON2 = sys.version[0] == "2"
@@ -68,22 +91,10 @@ filename = ""
 pcs_options = {}
 fence_bin = settings.fence_agent_binaries
 
-score_regexp = re.compile(r'^[+-]?((INFINITY)|(\d+))$')
-
-CIB_BOOLEAN_TRUE = ["true", "on", "yes", "y", "1"]
-CIB_BOOLEAN_FALSE = ["false", "off", "no", "n", "0"]
 
 class UnknownPropertyException(Exception):
     pass
 
-def simple_cache(func):
-    cache = {}
-    def wrapper(*args):
-        if args not in cache:
-            cache[args] = func()
-        return cache[args]
-    return wrapper
-
 def getValidateWithVersion(dom):
     cib = dom.getElementsByTagName("cib")
     if len(cib) != 1:
@@ -106,9 +117,16 @@ def checkAndUpgradeCIB(major,minor,rev):
     if cmajor > major or (cmajor == major and cminor > minor) or (cmajor == major and cminor == minor and crev >= rev):
         return False
     else:
-        cluster.cluster_upgrade()
+        cluster_upgrade()
         return True
 
+def cluster_upgrade():
+    output, retval = run(["cibadmin", "--upgrade", "--force"])
+    if retval != 0:
+        err("unable to upgrade cluster: %s" % output)
+    print("Cluster CIB has been upgraded to latest version")
+
+
 # Check status of node
 def checkStatus(node):
     return sendHTTPRequest(node, 'remote/status', None, False, False)
@@ -214,9 +232,24 @@ def setCorosyncConfig(node,config):
         if status != 0:
             err("Unable to set corosync config: {0}".format(data))
 
+def getPacemakerNodeStatus(node):
+    return sendHTTPRequest(
+        node, "remote/pacemaker_node_status", None, False, False
+    )
+
 def startCluster(node, quiet=False):
     return sendHTTPRequest(node, 'remote/cluster_start', None, False, not quiet)
 
+def stopPacemaker(node, quiet=False, force=True):
+    return stopCluster(
+        node, pacemaker=True, corosync=False, quiet=quiet, force=force
+    )
+
+def stopCorosync(node, quiet=False, force=True):
+    return stopCluster(
+        node, pacemaker=False, corosync=True, quiet=quiet, force=force
+    )
+
 def stopCluster(node, quiet=False, pacemaker=True, corosync=True, force=True):
     data = dict()
     if pacemaker and not corosync:
@@ -242,12 +275,12 @@ def restoreConfig(node, tarball_data):
     return sendHTTPRequest(node, "remote/config_restore", data, False, True)
 
 def pauseConfigSyncing(node, delay_seconds=300):
-  data = urllib_urlencode({"sync_thread_pause": delay_seconds})
-  return sendHTTPRequest(node, "remote/set_sync_options", data, False, False)
+    data = urllib_urlencode({"sync_thread_pause": delay_seconds})
+    return sendHTTPRequest(node, "remote/set_sync_options", data, False, False)
 
 def resumeConfigSyncing(node):
-  data = urllib_urlencode({"sync_thread_resume": 1})
-  return sendHTTPRequest(node, "remote/set_sync_options", data, False, False)
+    data = urllib_urlencode({"sync_thread_resume": 1})
+    return sendHTTPRequest(node, "remote/set_sync_options", data, False, False)
 
 def canAddNodeToCluster(node):
     retval, output = sendHTTPRequest(
@@ -365,6 +398,9 @@ def sendHTTPRequest(host, request, data = None, printResult = True, printSuccess
     except urllib_HTTPError as e:
         if "--debug" in pcs_options:
             print("Response Code: " + str(e.code))
+            html = e.read().decode("utf-8")
+            print("--Debug Response Start--\n{0}".format(html), end="")
+            print("--Debug Response End--")
         if e.code == 401:
             output = (
                 3,
@@ -413,13 +449,13 @@ def getNodesFromCorosyncConf(conf_text=None):
     return nodes
 
 def getNodesFromPacemaker():
-    ret_nodes = []
-    root = get_cib_etree()
-    nodes = root.findall(str(".//node"))
-    for node in nodes:
-        ret_nodes.append(node.attrib["uname"])
-    ret_nodes.sort()
-    return ret_nodes
+    try:
+        return [
+            node.attrs.name
+            for node in ClusterState(getClusterStateXml()).node_section.nodes
+        ]
+    except LibraryError as e:
+        process_library_reports(e.args)
 
 def getCorosyncConf(conf=None):
     if not conf:
@@ -441,8 +477,8 @@ def getCorosyncConfParsed(conf=None, text=None):
         except xml.parsers.expat.ExpatError as e:
             err("Unable to parse cluster.conf: %s" % e)
     try:
-        return corosync_conf_utils.parse_string(conf_text)
-    except corosync_conf_utils.CorosyncConfException as e:
+        return corosync_conf_parser.parse_string(conf_text)
+    except corosync_conf_parser.CorosyncConfParserException as e:
         err("Unable to parse corosync.conf: %s" % e)
 
 def setCorosyncConf(corosync_config, conf_file=None):
@@ -519,13 +555,10 @@ def getCorosyncActiveNodes():
 def addNodeToCorosync(node):
 # Before adding, make sure node isn't already in corosync.conf
     node0, node1 = parse_multiring_node(node)
-    used_node_ids = []
-    num_nodes_in_conf = 0
     corosync_conf_text = getCorosyncConf()
     for c_node in getNodesFromCorosyncConf(conf_text=corosync_conf_text):
         if (c_node == node0) or (c_node == node1):
             err("node already exists in corosync.conf")
-        num_nodes_in_conf = num_nodes_in_conf + 1
     if "--corosync_conf" not in pcs_options:
         for c_node in getCorosyncActiveNodes():
             if (c_node == node0) or (c_node == node1):
@@ -537,7 +570,7 @@ def addNodeToCorosync(node):
     if not nodelists:
         err("unable to find nodelist in corosync.conf")
     nodelist = nodelists[0]
-    new_node = corosync_conf_utils.Section("node")
+    new_node = corosync_conf_parser.Section("node")
     nodelist.add_section(new_node)
     new_node.add_attribute("ring0_addr", node0)
     if node1:
@@ -598,13 +631,11 @@ def addNodeToClusterConf(node):
 
 def removeNodeFromCorosync(node):
     removed_node = False
-    num_nodes_in_conf = 0
     node0, node1 = parse_multiring_node(node)
 
     corosync_conf = getCorosyncConfParsed()
     for nodelist in corosync_conf.get_sections("nodelist"):
         for node in nodelist.get_sections("node"):
-            num_nodes_in_conf += 1
             ring0_attrs = node.get_attributes("ring0_addr")
             if ring0_attrs:
                 ring0_conf = ring0_attrs[0][1]
@@ -619,7 +650,7 @@ def removeNodeFromCorosync(node):
     return removed_node
 
 def removeNodeFromClusterConf(node):
-    node0, node1 = parse_multiring_node(node)
+    node0, dummy_node1 = parse_multiring_node(node)
     nodes = getNodesFromCorosyncConf()
     if node0 not in nodes:
         return False
@@ -645,27 +676,9 @@ def removeNodeFromClusterConf(node):
     return True
 
 def autoset_2node_corosync(corosync_conf):
-    node_count = 0
-    auto_tie_breaker = False
-
-    for nodelist in corosync_conf.get_sections("nodelist"):
-        node_count += len(nodelist.get_sections("node"))
-    quorum_sections = corosync_conf.get_sections("quorum")
-    for quorum in quorum_sections:
-        for attr in quorum.get_attributes("auto_tie_breaker"):
-            auto_tie_breaker = attr[1] == "1"
-
-    if node_count == 2 and not auto_tie_breaker:
-        for quorum in quorum_sections:
-            quorum.set_attribute("two_node", "1")
-        if not quorum_sections:
-            quorum = corosync_conf_utils.Section("quorum")
-            quorum.add_attribute("two_node", "1")
-            corosync_conf.add_section(quorum)
-    else:
-        for quorum in quorum_sections:
-            quorum.del_attributes_by_name("two_node")
-    return corosync_conf
+    facade = corosync_conf_facade(corosync_conf)
+    facade._ConfigFacade__update_two_node()
+    return facade.config
 
 def getNextNodeID(corosync_conf):
     currentNodes = []
@@ -749,6 +762,7 @@ def subprocess_setup():
     signal.signal(signal.SIGPIPE, signal.SIG_DFL)
 
 # Run command, with environment and return (output, retval)
+# DEPRECATED, please use lib.external.CommandRunner via utils.cmd_runner()
 def run(
     args, ignore_stderr=False, string_for_stdin=None, env_extend=None,
     binary_output=False
@@ -798,7 +812,7 @@ def run(
             # decodes newlines and in python3 also converts bytes to str
             universal_newlines=(not PYTHON2 and not binary_output)
         )
-        output,stderror = p.communicate(string_for_stdin)
+        output, dummy_stderror = p.communicate(string_for_stdin)
         returnVal = p.returncode
         if "--debug" in pcs_options:
             print("Return Value: {0}".format(returnVal))
@@ -811,6 +825,18 @@ def run(
 
     return output, returnVal
 
+ at simple_cache
+def cmd_runner():
+    env_vars = dict()
+    if usefile:
+        env_vars["CIB_file"] = filename
+    env_vars.update(os.environ)
+    return CommandRunner(
+        logging.getLogger("old_cli"),
+        get_report_processor(),
+        env_vars
+    )
+
 def run_pcsdcli(command, data=None):
     if not data:
         data = dict()
@@ -843,6 +869,62 @@ def run_pcsdcli(command, data=None):
         }
     return output_json, retval
 
+def auth_nodes_do(nodes, username, password, force, local):
+    pcsd_data = {
+        'nodes': list(set(nodes)),
+        'username': username,
+        'password': password,
+        'force': force,
+        'local': local,
+    }
+    output, retval = run_pcsdcli('auth', pcsd_data)
+    if retval == 0 and output['status'] == 'access_denied':
+        err('Access denied')
+    if retval == 0 and output['status'] == 'ok' and output['data']:
+        failed = False
+        try:
+            if not output['data']['sync_successful']:
+                err(
+                    "Some nodes had a newer tokens than the local node. "
+                    + "Local node's tokens were updated. "
+                    + "Please repeat the authentication if needed."
+                )
+            for node, result in output['data']['auth_responses'].items():
+                if result['status'] == 'ok':
+                    print("{0}: Authorized".format(node))
+                elif result['status'] == 'already_authorized':
+                    print("{0}: Already authorized".format(node))
+                elif result['status'] == 'bad_password':
+                    err(
+                        "{0}: Username and/or password is incorrect".format(node),
+                        False
+                    )
+                    failed = True
+                elif result['status'] == 'noresponse':
+                    err("Unable to communicate with {0}".format(node), False)
+                    failed = True
+                else:
+                    err("Unexpected response from {0}".format(node), False)
+                    failed = True
+            if output['data']['sync_nodes_err']:
+                err(
+                    (
+                        "Unable to synchronize and save tokens on nodes: {0}. "
+                        + "Are they authorized?"
+                    ).format(
+                        ", ".join(output['data']['sync_nodes_err'])
+                    ),
+                    False
+                )
+                failed = True
+        except:
+            err('Unable to communicate with pcsd')
+        if failed:
+            sys.exit(1)
+        return
+    err('Unable to communicate with pcsd')
+
+
 def call_local_pcsd(argv, interactive_auth=False, std_in=None):
     # some commands cannot be run under a non-root account
     # so we pass those commands to locally running pcsd to execute them
@@ -862,7 +944,7 @@ def call_local_pcsd(argv, interactive_auth=False, std_in=None):
         print('Please authenticate yourself to the local pcsd')
         username = get_terminal_input('Username: ')
         password = get_terminal_password()
-        cluster.auth_nodes_do(["localhost"], username, password, True, True)
+        auth_nodes_do(["localhost"], username, password, True, True)
         print()
         code, output = sendHTTPRequest(
             "localhost", "run_pcs", data_send, False, False
@@ -904,29 +986,66 @@ def map_for_error_list(callab, iterab):
             error_list.append(err)
     return error_list
 
-def run_node_threads(node_threads):
-    error_list = []
-    for node, thread in node_threads.items():
+def run_parallel(worker_list, wait_seconds=1):
+    thread_list = []
+    for worker in worker_list:
+        thread = threading.Thread(target=worker)
         thread.daemon = True
         thread.start()
-    while node_threads:
-        for node in list(node_threads.keys()):
-            thread = node_threads[node]
-            thread.join(1)
-            if thread.is_alive():
-                continue
-            output = node + ": " + thread.output.strip()
-            print(output)
-            if thread.retval != 0:
-                error_list.append(output)
-            del node_threads[node]
+        thread_list.append(thread)
+
+    while thread_list:
+        for thread in thread_list:
+            thread.join(wait_seconds)
+            if not thread.is_alive():
+                thread_list.remove(thread)
+
+def create_task(report, action, node, *args, **kwargs):
+    def worker():
+        returncode, output = action(node, *args, **kwargs)
+        report(node, returncode, output)
+    return worker
+
+def create_task_list(report, action, node_list, *args, **kwargs):
+    return [
+        create_task(report, action, node, *args, **kwargs) for node in node_list
+    ]
+
+def parallel_for_nodes(action, node_list, *args, **kwargs):
+    error_list = []
+    def report(node, returncode, output):
+        message = '{0}: {1}'.format(node, output.strip())
+        print(message)
+        if returncode != 0:
+            error_list.append(message)
+    run_parallel(
+        create_task_list(report, action, node_list, *args, **kwargs)
+    )
     return error_list
 
+def prepare_node_name(node, pm_nodes, cs_nodes):
+    '''
+    Return pacemaker-corosync combined name for node if needed
+    pm_nodes dictionary pacemaker nodes id:node_name
+    cs_nodes dictionary corosync nodes id:node_name
+    '''
+    if node in pm_nodes.values():
+        return node
+
+    for cs_id, cs_name in cs_nodes.items():
+        if node == cs_name and cs_id in pm_nodes:
+            return '{0} ({1})'.format(
+                pm_nodes[cs_id] if pm_nodes[cs_id] != '(null)' else "*Unknown*",
+                node
+            )
+
+    return node
+
 # Check is something exists in the CIB, if it does return it, if not, return
 #  an empty string
 def does_exist(xpath_query):
     args = ["cibadmin", "-Q", "--xpath", xpath_query]
-    output,retval = run(args)
+    dummy_output,retval = run(args)
     if (retval != 0):
         return False
     return True
@@ -1060,6 +1179,8 @@ def dom_get_resource_masterslave(dom, resource_id):
     return None
 
 # returns tuple (is_valid, error_message, correct_resource_id_if_exists)
+# there is a duplicate code in pcs/lib/cib/constraint/constraint.py
+# please use function in pcs/lib/cib/constraint/constraint.py
 def validate_constraint_resource(dom, resource_id):
     resource_el = (
         dom_get_clone(dom, resource_id)
@@ -1160,7 +1281,7 @@ def dom_get_parent_by_tag_name(dom_el, tag_name):
 def dom_attrs_to_list(dom_el, with_id=False):
     attributes = [
         "%s=%s" % (name, value)
-        for name, value in dom_el.attributes.items() if name != "id"
+        for name, value in sorted(dom_el.attributes.items()) if name != "id"
     ]
     if with_id:
         attributes.append("(id:%s)" % (dom_el.getAttribute("id")))
@@ -1261,63 +1382,41 @@ def does_resource_have_options(ra_type):
 # Given a resource agent (ocf:heartbeat:XXX) return an list of default
 # operations or an empty list if unable to find any default operations
 def get_default_op_values(ra_type):
-    allowable_operations = ["monitor","start","stop","promote","demote"]
-    ra_split = ra_type.split(':')
-    if len(ra_split) != 3:
-        return []
-
-    ra_path = "/usr/lib/ocf/resource.d/" + ra_split[1] + "/" + ra_split[2]
-    metadata = get_metadata(ra_path)
+    allowable_operations = ["monitor", "start", "stop", "promote", "demote"]
+    default_ops = []
+    try:
+        metadata = lib_ra.get_resource_agent_metadata(cmd_runner(), ra_type)
+        actions = lib_ra.get_agent_actions(metadata)
 
-    if metadata == False:
+        for action in actions:
+            if action["name"] not in allowable_operations:
+                continue
+            op = [action["name"]]
+            for key in action.keys():
+                if key != "name" and (key != "depth" or action[key] != "0"):
+                    op.append("{0}={1}".format(key, action[key]))
+            default_ops.append(op)
+    except (
+        lib_ra.UnsupportedResourceAgent,
+        lib_ra.AgentNotFound,
+        lib_ra.UnableToGetAgentMetadata
+    ):
         return []
+    except LibraryError as e:
+        process_library_reports(e.args)
+
+    return default_ops
 
-    return_list = []
-    try:
-        root = ET.fromstring(metadata)
-        actions = root.findall(str(".//actions/action"))
-        for action in actions:
-            if action.attrib["name"] in allowable_operations:
-                new_operation = []
-                new_operation.append(action.attrib["name"])
-                for attrib in action.attrib:
-                    value = action.attrib[attrib]
-                    if attrib == "name" or (attrib == "depth" and value == "0"):
-                        continue
-                    new_operation.append(attrib + "=" + value)
-                return_list.append(new_operation)
-    except xml.parsers.expat.ExpatError as e:
-        err("Unable to parse xml for '%s': %s" % (ra_type, e))
-    except xml.etree.ElementTree.ParseError as e:
-        err("Unable to parse xml for '%s': %s" % (ra_type, e))
-
-    return return_list
-
-def get_timeout_seconds(timeout, return_unknown=False):
-    if timeout.isdigit():
-        return int(timeout)
-    suffix_multiplier = {
-        "s": 1,
-        "sec": 1,
-        "m": 60,
-        "min": 60,
-        "h": 3600,
-        "hr": 3600,
-    }
-    for suffix, multiplier in suffix_multiplier.items():
-        if timeout.endswith(suffix) and timeout[:-len(suffix)].isdigit():
-            return int(timeout[:-len(suffix)]) * multiplier
-    return timeout if return_unknown else None
 
 def check_pacemaker_supports_resource_wait():
-    output, retval = run(["crm_resource", "-?"])
-    if "--wait" not in output:
+    if not has_resource_wait_support(cmd_runner()):
         err("crm_resource does not support --wait, please upgrade pacemaker")
 
-def validate_wait_get_timeout():
-    check_pacemaker_supports_resource_wait()
-    if usefile:
-        err("Cannot use '-f' together with '--wait'")
+def validate_wait_get_timeout(need_cib_support=True):
+    if need_cib_support:
+        check_pacemaker_supports_resource_wait()
+        if usefile:
+            err("Cannot use '-f' together with '--wait'")
     wait_timeout = pcs_options["--wait"]
     if wait_timeout is None:
         return wait_timeout
@@ -1329,104 +1428,67 @@ def validate_wait_get_timeout():
         )
     return wait_timeout
 
+
+def is_file_abs_path(path):
+    return path == os.path.abspath(path) and os.path.isfile(path)
+
 # Check and see if the specified resource (or stonith) type is present on the
 # file system and properly responds to a meta-data request
 def is_valid_resource(resource, caseInsensitiveCheck=False):
-    if resource.startswith("ocf:"):
-        resource_split = resource.split(":",3)
-        if len(resource_split) != 3:
-            err("ocf resource definition (" + resource + ") does not match the ocf:provider:name pattern")
-        providers = [resource_split[1]]
-        resource = resource_split[2]
-    elif resource.startswith("stonith:"):
-        resource_split = resource.split(":", 2)
-        stonith = resource_split[1]
-        metadata = get_stonith_metadata("/usr/sbin/" + stonith)
-        if metadata != False:
-            return True
-        else:
-            return False
-    elif resource.startswith("nagios:"):
-        # search for nagios script
-        resource_split = resource.split(":", 2)
-        if os.path.isfile("/usr/share/pacemaker/nagios/plugins-metadata/%s.xml" % resource_split[1]):
-            return True
-        else:
-            return False
-    elif resource.startswith("lsb:"):
-        resource_split = resource.split(":",2)
-        lsb_ra = resource_split[1]
-        if os.path.isfile("/etc/init.d/" + lsb_ra):
-            return True
+    try:
+        if resource.startswith("stonith:"):
+            lib_ra.get_fence_agent_metadata(
+                cmd_runner(), resource.split("stonith:", 1)[1]
+            )
         else:
-            return False
+            lib_ra.get_resource_agent_metadata(cmd_runner(), resource)
+        # return True if no exception was raised
+        return True
+    except lib_ra.UnsupportedResourceAgent:
+        pass
+    except LibraryError:
+        # agent not exists or obtaining metadata failed
+        return False
+
+    if resource.startswith("lsb:"):
+        agent = os.path.join("/etc/init.d/", resource.split(":", 1)[1])
+        return is_file_abs_path(agent)
     elif resource.startswith("systemd:"):
-        resource_split = resource.split(":",2)
-        systemd_ra = resource_split[1]
-        if os.path.isfile("/etc/systemd/system/" + systemd_ra + ".service") or os.path.isfile("/usr/lib/systemd/system/" + systemd_ra + ".service"):
-            return True
-        else:
-            return False
-    else:
-        providers = sorted(os.listdir("/usr/lib/ocf/resource.d"))
+        _, agent_name = resource.split(":", 1)
+        agent1 = os.path.join(
+            "/etc/systemd/system/", agent_name + ".service"
+        )
+        agent2 = os.path.join(
+            "/usr/lib/systemd/system/", agent_name + ".service"
+        )
+        return is_file_abs_path(agent1) or is_file_abs_path(agent2)
 
-    # search for ocf script
-    for provider in providers:
-        filepath = "/usr/lib/ocf/resource.d/" + provider + "/"
+    # resource name is not full, maybe it's ocf resource
+    for provider in sorted(os.listdir(settings.ocf_resources)):
+        provider_path = os.path.join(settings.ocf_resources, provider)
         if caseInsensitiveCheck:
-            if os.path.isdir(filepath):
-                all_files = [ f for f in os.listdir(filepath ) ]
-                for f in all_files:
-                    if f.lower() == resource.lower() and os.path.isfile(filepath + f):
-                        return "ocf:" + provider + ":" + f
+            if os.path.isdir(provider_path):
+                for f in os.listdir(provider_path):
+                    if (
+                        f.lower() == resource.lower() and
+                        os.path.isfile(os.path.join(provider_path, f))
+                    ):
+                        return "ocf:{0}:{1}".format(provider, f)
                 continue
 
-        metadata = get_metadata(filepath + resource)
-        if metadata == False:
-            continue
-        else:
-            # found it
-            return True
-
+        if os.path.exists(
+            os.path.join(settings.ocf_resources, provider, resource)
+        ):
+            try:
+                lib_ra.get_resource_agent_metadata(
+                    cmd_runner(),
+                    "ocf:{0}:{1}".format(provider, resource)
+                )
+                return True
+            except LibraryError:
+                continue
     return False
 
-# Get metadata from resource agent
-def get_metadata(resource_agent_script):
-    os.environ['OCF_ROOT'] = "/usr/lib/ocf/"
-    if (not os.path.isfile(resource_agent_script)) or (not os.access(resource_agent_script, os.X_OK)):
-        return False
-
-    (metadata, retval) = run([resource_agent_script, "meta-data"],True)
-    if retval == 0:
-        return metadata
-    else:
-        return False
-
-def get_stonith_metadata(fence_agent_script):
-    if (not os.path.isfile(fence_agent_script)) or (not os.access(fence_agent_script, os.X_OK)):
-        return False
-    (metadata, retval) = run([fence_agent_script, "-o", "metadata"], True)
-    if retval == 0:
-        return metadata
-    else:
-        return False
-
-def get_default_stonith_options():
-    (metadata, retval) = run([settings.stonithd_binary, "metadata"],True)
-    if retval == 0:
-        root = ET.fromstring(metadata)
-        params = root.findall(str(".//parameter"))
-        default_params = []
-        for param in params:
-            adv_param = False
-            for short_desc in param.findall(str(".//shortdesc")):
-                if short_desc.text.startswith("Advanced use only"):
-                    adv_param = True
-            if adv_param == False:
-                default_params.append(param)
-        return default_params
-    else:
-        return []
 
 # Return matches from the CIB with the xpath_query
 def get_cib_xpath(xpath_query):
@@ -1477,9 +1539,15 @@ def is_etree(var):
 # Replace only configuration section of cib with dom passed
 def replace_cib_configuration(dom):
     if is_etree(dom):
-        new_dom = ET.tostring(dom)
-    else:
+        #etree returns string in bytes: b'xml'
+        #python 3 removed .encode() from byte strings
+        #run(...) calls subprocess.Popen.communicate which calls encode...
+        #so there is bytes to str conversion
+        new_dom = ET.tostring(dom).decode()
+    elif hasattr(dom, "toxml"):
         new_dom = dom.toxml()
+    else:
+        new_dom = dom
     output, retval = run(["cibadmin", "--replace", "-o", "configuration", "-V", "--xml-pipe"],False,new_dom)
     if retval != 0:
         err("Unable to update cib\n"+output)
@@ -1491,6 +1559,7 @@ def is_valid_cib_scope(scope):
     ]
 
 # Checks to see if id exists in the xml dom passed
+# DEPRECATED use lxml version available in pcs.lib.cib.tools
 def does_id_exist(dom, check_id):
     if is_etree(dom):
         for elem in dom.findall(str(".//*")):
@@ -1505,6 +1574,7 @@ def does_id_exist(dom, check_id):
 
 # Returns check_id if it doesn't exist in the dom, otherwise it adds an integer
 # to the end of the id and increments it until a unique id is found
+# DEPRECATED use lxml version available in pcs.lib.cib.tools
 def find_unique_id(dom, check_id):
     counter = 1
     temp_id = check_id
@@ -1661,7 +1731,7 @@ def getTerminalSize(fd=1):
 
 def get_terminal_input(message=None):
     if message:
-        sys.stdout.write('Username: ')
+        sys.stdout.write(message)
         sys.stdout.flush()
     if PYTHON2:
         return raw_input("")
@@ -1675,17 +1745,16 @@ def get_terminal_password(message="Password: "):
         return get_terminal_input(message)
 
 # Returns an xml dom containing the current status of the cluster
+# DEPRECATED, please use ClusterState(getClusterStateXml()) instead
 def getClusterState():
-    (output, retval) = run(["crm_mon", "-1", "-X","-r"])
-    if (retval != 0):
-        err("error running crm_mon, is pacemaker running?")
-    dom = parseString(output)
-    return dom
-
-def getNodeAttributes():
-    dom = get_cib_dom()
-    nodes = dom.getElementsByTagName("node")
+    return parseString(getClusterStateXml())
 
+# DEPRECATED, please use lib.pacemaker.get_cluster_status_xml in new code
+def getClusterStateXml():
+    xml, returncode = run(["crm_mon", "--one-shot", "--as-xml", "--inactive"])
+    if returncode != 0:
+        err("error running crm_mon, is pacemaker running?")
+    return xml
 
 # Returns true if stonith-enabled is not false/off & no stonith devices exist
 # So if the cluster can't start due to missing stonith devices return true
@@ -1728,7 +1797,7 @@ def getCorosyncNodesID(allow_failure=False):
 
         (output, retval) = run(['corosync-cmapctl', '-b', 'nodelist.node'])
     else:
-        err_msgs, retval, output, std_err = call_local_pcsd(
+        err_msgs, retval, output, dummy_std_err = call_local_pcsd(
             ['status', 'nodes', 'corosync-id'], True
         )
         if err_msgs:
@@ -1761,7 +1830,7 @@ def getPacemakerNodesID(allow_failure=False):
     if os.getuid() == 0:
         (output, retval) = run(['crm_node', '-l'])
     else:
-        err_msgs, retval, output, std_err = call_local_pcsd(
+        err_msgs, retval, output, dummy_std_err = call_local_pcsd(
             ['status', 'nodes', 'pacemaker-id'], True
         )
         if err_msgs:
@@ -1807,76 +1876,6 @@ def getResourceType(resource):
     resType = resource.getAttribute("type")
     return resClass + ":" + resProvider + ":" + resType
 
-# Returns empty array if all attributes are valid, otherwise return an array
-# of bad attributes
-# res_id is the resource id
-# ra_values is an array of 2 item tuples (key, value)
-# resource is a python minidom element of the resource from the cib
-def validInstanceAttributes(res_id, ra_values, resource_type):
-    ra_values = dict(ra_values)
-    found = False
-    stonithDevice = False
-    resSplit = resource_type.split(":")
-    if len(resSplit) == 2:
-        (resClass, resType) = resSplit
-        metadata = get_stonith_metadata(fence_bin + resType)
-        stonithDevice = True
-    else:
-        (resClass, resProvider, resType) = resource_type.split(":")
-        metadata = get_metadata("/usr/lib/ocf/resource.d/" + resProvider + "/" + resType)
-
-    if metadata == False:
-        err("Unable to get metadata for resource: %s" % resource_type)
-
-    missing_required_parameters = []
-    valid_parameters = ["pcmk_host_list", "pcmk_host_map", "pcmk_host_check", "pcmk_host_argument", "pcmk_arg_map", "pcmk_list_cmd", "pcmk_status_cmd", "pcmk_monitor_cmd"]
-    valid_parameters = valid_parameters + ["stonith-timeout", "priority", "timeout"]
-    valid_parameters = valid_parameters + ["pcmk_reboot_action", "pcmk_poweroff_action", "pcmk_list_action", "pcmk_monitor_action", "pcmk_status_action"]
-    for a in ["off","on","status","list","metadata","monitor", "reboot"]:
-        valid_parameters.append("pcmk_" + a + "_action")
-        valid_parameters.append("pcmk_" + a + "_timeout")
-        valid_parameters.append("pcmk_" + a + "_retries")
-    bad_parameters = []
-    try:
-        actions = ET.fromstring(metadata).find("parameters")
-        for action in actions.findall(str("parameter")):
-            valid_parameters.append(action.attrib["name"])
-            if "required" in action.attrib and action.attrib["required"] == "1":
-# If a default value is set, then the attribute isn't really required (for 'action' on stonith devices only)
-                default_exists = False
-                if action.attrib["name"] == "action" and stonithDevice:
-                    for ch in action:
-                        if ch.tag == "content" and "default" in ch.attrib:
-                            default_exists = True
-                            break
-
-                if not default_exists:
-                    missing_required_parameters.append(action.attrib["name"])
-    except xml.parsers.expat.ExpatError as e:
-        err("Unable to parse xml for '%s': %s" % (resource_type, e))
-    except xml.etree.ElementTree.ParseError as e:
-        err("Unable to parse xml for '%s': %s" % (resource_type, e))
-    for key,value in ra_values.items():
-        if key not in valid_parameters:
-            bad_parameters.append(key)
-        if key in missing_required_parameters:
-            missing_required_parameters.remove(key)
-
-    if missing_required_parameters:
-        if resClass == "stonith" and "port" in missing_required_parameters:
-            # Temporarily make "port" an optional parameter. Once we are
-            # getting metadata from pacemaker, this will be reviewed and fixed.
-            #if (
-            #    "pcmk_host_argument" in ra_values
-            #    or
-            #    "pcmk_host_map" in ra_values
-            #    or
-            #    "pcmk_host_list" in ra_values
-            #):
-            missing_required_parameters.remove("port")
-
-    return bad_parameters, missing_required_parameters
-
 def getClusterName():
     if is_rhel6():
         try:
@@ -1888,7 +1887,7 @@ def getClusterName():
     else:
         try:
             f = open(settings.corosync_conf_file,'r')
-            conf = corosync_conf_utils.parse_string(f.read())
+            conf = corosync_conf_parser.parse_string(f.read())
             f.close()
             # mimic corosync behavior - the last cluster_name found is used
             cluster_name = None
@@ -1897,7 +1896,7 @@ def getClusterName():
                     cluster_name = attrs[1]
             if cluster_name:
                 return cluster_name
-        except (IOError, corosync_conf_utils.CorosyncConfException) as e:
+        except (IOError, corosync_conf_parser.CorosyncConfParserException):
             return ""
 
     return ""
@@ -1927,34 +1926,18 @@ def is_score_or_opt(var):
     return False
 
 def is_score(var):
-    return score_regexp.match(var) is not None
+    return is_score_value(var)
 
 def validate_xml_id(var, description="id"):
-    # see NCName definition
-    # http://www.w3.org/TR/REC-xml-names/#NT-NCName
-    # http://www.w3.org/TR/REC-xml/#NT-Name
-    if len(var) < 1:
-        return False, "%s cannot be empty" % description
-    first_char_re = re.compile("[a-zA-Z_]")
-    if not first_char_re.match(var[0]):
-        return (
-            False,
-            "invalid %s '%s', '%s' is not a valid first character for a %s"
-                % (description, var, var[0], description)
-        )
-    char_re = re.compile("[a-zA-Z0-9_.-]")
-    for char in var[1:]:
-        if not char_re.match(char):
-            return (
-                False,
-                "invalid %s '%s', '%s' is not a valid character for a %s"
-                    % (description, var, char, description)
-            )
+    try:
+        validate_id(var, description)
+    except LibraryError as e:
+        return False, e.args[0].message
     return True, ""
 
 def is_iso8601_date(var):
     # using pacemaker tool to check if a value is a valid pacemaker iso8601 date
-    output, retVal = run(["iso8601", "-d", var])
+    dummy_output, retVal = run(["iso8601", "-d", var])
     return retVal == 0
 
 def verify_cert_key_pair(cert, key):
@@ -1986,14 +1969,6 @@ def verify_cert_key_pair(cert, key):
 
     return errors
 
-# Does pacemaker consider a variable as true in cib?
-# See crm_is_true in pacemaker/lib/common/utils.c
-def is_cib_true(var):
-    return var.lower() in CIB_BOOLEAN_TRUE
-
-def is_cib_boolean(val):
-    return val.lower() in CIB_BOOLEAN_TRUE + CIB_BOOLEAN_FALSE
-
 def is_systemctl():
     systemctl_paths = [
         '/usr/bin/systemctl',
@@ -2007,17 +1982,7 @@ def is_systemctl():
 
 @simple_cache
 def is_rhel6():
-    # Checking corosync version works in most cases and supports non-rhel
-    # distributions as well as running (manually compiled) corosync2 on rhel6.
-    # - corosync2 does not support cman at all
-    # - corosync1 runs with cman on rhel6
-    # - corosync1 can be used without cman, but we don't support it anyways
-    # - corosync2 is the default result if errors occur
-    output, retval = run(["corosync", "-v"])
-    if retval != 0:
-        return False
-    match = re.search(r"version\D+(\d+)", output)
-    return match and match.group(1) == "1"
+    return is_cman_cluster(cmd_runner())
 
 def err(errorText, exit_after_error=True):
     sys.stderr.write("Error: %s\n" % errorText)
@@ -2029,31 +1994,7 @@ def process_library_reports(report_item_list):
     """
     report_item_list list of ReportItem
     """
-    critical_error = False
-    for report_item in report_item_list:
-        if report_item.severity == ReportItemSeverity.WARNING:
-            print("Warning: " + report_item.message)
-            continue
-
-        if report_item.severity != ReportItemSeverity.ERROR:
-            print(report_item.message)
-            continue
-
-        if report_item.forceable and "--force" in pcs_options:
-            # Let the user know what may be wrong even when --force is used,
-            # as it may be used for override early errors hiding later
-            # errors otherwise.
-            print("Warning: " + report_item.message)
-            continue
-
-        sys.stderr.write('Error: {0}{1}\n'.format(
-            report_item.message,
-            ", use --force to override" if report_item.forceable else ''
-        ))
-        critical_error = True
-
-    if critical_error:
-        sys.exit(1)
+    process_lib_reports(report_item_list, "--force" in pcs_options)
 
 def serviceStatus(prefix):
     if not is_systemctl():
@@ -2092,7 +2033,7 @@ def disableServices():
 
 def write_file(path, data, permissions=0o644, binary=False):
     if os.path.exists(path):
-        if not "--force" in pcs_options:
+        if "--force" not in pcs_options:
             return False, "'%s' already exists, use --force to overwrite" % path
         else:
             try:
@@ -2460,7 +2401,7 @@ def is_valid_cib_value(type, value, enum_options=[]):
     if type == "enum":
         return value in enum_options
     elif type == "boolean":
-        return is_cib_boolean(value)
+        return is_boolean(value)
     elif type == "integer":
         return is_score(value)
     elif type == "time":
@@ -2567,14 +2508,105 @@ def get_cluster_property_from_xml(etree_el):
         property["longdesc"] = ""
     return property
 
+# DEPRECATED use lxml version available in pcs.lib.cib.tools
 def get_acls(dom):
     acls = dom.getElementsByTagName("acls")
     if len(acls) == 0:
         acls = dom.createElement("acls")
         conf = dom.getElementsByTagName("configuration")
         if len(conf) == 0:
-            utils.err("Unable to get configuration section of cib")
+            err("Unable to get configuration section of cib")
         conf[0].appendChild(acls)
     else:
         acls = acls[0]
     return acls
+
+def get_lib_env():
+    user = None
+    groups = None
+    if os.geteuid() == 0:
+        for name in ("CIB_user", "CIB_user_groups"):
+            if name in os.environ and os.environ[name].strip():
+                value = os.environ[name].strip()
+                if "CIB_user" == name:
+                    user = value
+                else:
+                    groups = value.split(" ")
+
+    cib_data = None
+    if usefile:
+        cib_data = get_cib()
+
+    corosync_conf_data = None
+    if "--corosync_conf" in pcs_options:
+        conf = pcs_options["--corosync_conf"]
+        try:
+            corosync_conf_data = open(conf).read()
+        except IOError as e:
+            err("Unable to read %s: %s" % (conf, e.strerror))
+
+    return LibraryEnvironment(
+        logging.getLogger("old_cli"),
+        get_report_processor(),
+        user,
+        groups,
+        cib_data,
+        corosync_conf_data,
+        auth_tokens_getter=readTokens,
+    )
+
+def get_cli_env():
+    user = None
+    groups = None
+    if os.geteuid() == 0:
+        for name in ("CIB_user", "CIB_user_groups"):
+            if name in os.environ and os.environ[name].strip():
+                value = os.environ[name].strip()
+                if "CIB_user" == name:
+                    user = value
+                else:
+                    groups = value.split(" ")
+
+    env = Env()
+    env.user = user
+    env.groups = groups
+    env.auth_tokens_getter = readTokens
+    env.debug = "--debug" in pcs_options
+    return env
+
+def get_middleware_factory():
+    return middleware.create_middleware_factory(
+        cib=middleware.cib(usefile, get_cib, replace_cib_configuration),
+        corosync_conf_existing=middleware.corosync_conf_existing(
+            pcs_options.get("--corosync_conf", None)
+        )
+    )
+
+def get_library_wrapper():
+    return Library(get_cli_env(), get_middleware_factory())
+
+
+def get_modificators():
+    #please keep in mind that this is not final implemetation
+    #beside missing support of other possible options, cases may arise that can
+    #not be solved using a dict - for example "wait" - maybe there will be
+    #global default for it and maybe there will appear need for local default...
+    #there is possible create class extending dict, so dict like access in
+    #commands is not an issue
+    return {
+        "full": "--full" in pcs_options,
+        "autocorrect": "--autocorrect" in pcs_options,
+        "force": "--force" in pcs_options,
+        "skip_offline_nodes": "--skip-offline" in pcs_options,
+        "corosync_conf": pcs_options.get("--corosync_conf", None),
+    }
+
+def exit_on_cmdline_input_errror(error, main_name, usage_name):
+    if error.message:
+        err(error.message)
+    else:
+        usage.show(main_name, [usage_name])
+    sys.exit(1)
+
+def get_report_processor():
+    return LibraryReportProcessorToConsole(debug=("--debug" in pcs_options))
diff --git a/pcsd/.gitignore b/pcsd/.gitignore
deleted file mode 100644
index 180bf07..0000000
--- a/pcsd/.gitignore
+++ /dev/null
@@ -1,2 +0,0 @@
-.bundle
-vendor
diff --git a/pcsd/Gemfile b/pcsd/Gemfile
index fb97a1a..e851eaf 100644
--- a/pcsd/Gemfile
+++ b/pcsd/Gemfile
@@ -12,8 +12,6 @@ gem 'tilt'
 gem 'eventmachine'
 gem 'rack-test'
 gem 'backports'
-gem 'sinatra-sugar'
-gem 'monkey-lib'
 gem 'rpam-ruby19', :platform => [:ruby_19, :ruby_20, :ruby_21, :ruby_22]
 gem 'json'
 gem 'multi_json'
diff --git a/pcsd/Gemfile.lock b/pcsd/Gemfile.lock
index 3140d6f..eff055a 100644
--- a/pcsd/Gemfile.lock
+++ b/pcsd/Gemfile.lock
@@ -2,12 +2,10 @@ GEM
   remote: https://rubygems.org/
   remote: https://tojeline.fedorapeople.org/rubygems/
   specs:
-    backports (3.6.4)
-    eventmachine (1.0.7)
+    backports (3.6.8)
+    eventmachine (1.2.0.1)
     json (1.8.3)
-    monkey-lib (0.5.4)
-      backports
-    multi_json (1.11.1)
+    multi_json (1.12.0)
     open4 (1.3.4)
     orderedhash (0.0.6)
     rack (1.6.4)
@@ -16,21 +14,18 @@ GEM
     rack-test (0.6.3)
       rack (>= 1.0)
     rpam-ruby19 (1.2.1)
-    sinatra (1.4.6)
+    sinatra (1.4.7)
       rack (~> 1.4)
       rack-protection (~> 1.4)
       tilt (>= 1.3, < 3)
-    sinatra-contrib (1.4.4)
+    sinatra-contrib (1.4.7)
       backports (>= 2.0)
       multi_json
       rack-protection
       rack-test
       sinatra (~> 1.4.0)
       tilt (>= 1.3, < 3)
-    sinatra-sugar (0.5.1)
-      monkey-lib (~> 0.5.0)
-      sinatra (~> 1.0)
-    tilt (1.4.1)
+    tilt (2.0.3)
 
 PLATFORMS
   ruby
@@ -39,7 +34,6 @@ DEPENDENCIES
   backports
   eventmachine
   json
-  monkey-lib
   multi_json
   open4
   orderedhash
@@ -49,5 +43,4 @@ DEPENDENCIES
   rpam-ruby19
   sinatra
   sinatra-contrib
-  sinatra-sugar
   tilt
diff --git a/pcsd/Makefile b/pcsd/Makefile
index e18d2df..798a8bd 100644
--- a/pcsd/Makefile
+++ b/pcsd/Makefile
@@ -5,7 +5,21 @@ build_gems: get_gems
 # also bundler is not available on RHEL6 in rpm
 build_gems_rhel6:
 	mkdir -p vendor/bundle/ruby
-	gem install --verbose --no-rdoc --no-ri -l -i vendor/bundle/ruby vendor/cache/backports-3.6.4.gem vendor/cache/eventmachine-1.0.7.gem vendor/cache/json-1.8.3.gem vendor/cache/monkey-lib-0.5.4.gem vendor/cache/multi_json-1.11.1.gem vendor/cache/open4-1.3.4.gem vendor/cache/orderedhash-0.0.6.gem vendor/cache/rack-1.6.4.gem vendor/cache/rack-protection-1.5.3.gem vendor/cache/rack-test-0.6.3.gem vendor/cache/rpam-ruby19-feist-1.2.1.1.gem vendor/cache/tilt-1.4.1.gem vendor/cache/sinatra-1.4. [...]
+	gem install --verbose --no-rdoc --no-ri -l -i vendor/bundle/ruby \
+	vendor/cache/backports-3.6.8.gem \
+	vendor/cache/eventmachine-1.2.0.1.gem \
+	vendor/cache/json-1.8.3.gem \
+	vendor/cache/multi_json-1.12.1.gem \
+	vendor/cache/open4-1.3.4.gem \
+	vendor/cache/orderedhash-0.0.6.gem \
+	vendor/cache/rack-1.6.4.gem \
+	vendor/cache/rack-protection-1.5.3.gem \
+	vendor/cache/rack-test-0.6.3.gem \
+	vendor/cache/rpam-ruby19-feist-1.2.1.1.gem \
+	vendor/cache/tilt-2.0.3.gem \
+	vendor/cache/sinatra-1.4.7.gem \
+	vendor/cache/sinatra-contrib-1.4.7.gem \
+	-- '--with-ldflags="-Wl,-z,now -Wl,-z,relro"'
 
 get_gems:
 	bundle package
diff --git a/pcsd/bootstrap.rb b/pcsd/bootstrap.rb
index d2c6c25..6b2cc84 100644
--- a/pcsd/bootstrap.rb
+++ b/pcsd/bootstrap.rb
@@ -43,7 +43,7 @@ def get_pcs_path(pcsd_path)
   end
 end
 
-PCS_VERSION = '0.9.149'
+PCS_VERSION = '0.9.151'
 COROSYNC = COROSYNC_BINARIES + "corosync"
 ISRHEL6 = is_rhel6
 ISSYSTEMCTL = is_systemctl
diff --git a/pcsd/cfgsync.rb b/pcsd/cfgsync.rb
index ca5a21a..ce1aeb0 100644
--- a/pcsd/cfgsync.rb
+++ b/pcsd/cfgsync.rb
@@ -50,16 +50,13 @@ module Cfgsync
 
     def self.from_file(default=nil)
       begin
+        return self.on_file_missing(default) if not File::exist?(@file_path)
         file = nil
         file = File.open(@file_path, File::RDONLY)
         file.flock(File::LOCK_SH)
         return self.from_text(file.read())
       rescue => e
-        $logger.warn(
-          "Cannot read config '#{@name}' from '#{@file_path}': #{e.message}"
-        )
-        return self.from_text(default) if default
-        raise
+        return self.on_file_read_error(e, default)
       ensure
         unless file.nil?
           file.flock(File::LOCK_UN)
@@ -155,6 +152,22 @@ module Cfgsync
 
     protected
 
+    def self.on_file_missing(default)
+      $logger.warn(
+        "Cannot read config '#{@name}' from '#{@file_path}': No such file"
+      )
+      return self.from_text(default) if default
+      raise SystemCallError.new(@file_path, Errno::ENOENT::Errno)
+    end
+
+    def self.on_file_read_error(exception, default)
+      $logger.warn(
+        "Cannot read config '#{@name}' from '#{@file_path}': #{exception.message}"
+      )
+      return self.from_text(default) if default
+      raise exception
+    end
+
     def initialize(text)
       self.text = text
     end
@@ -166,7 +179,7 @@ module Cfgsync
     end
 
     def get_hash()
-      return Digest::SHA1.hexdigest(self.text)
+      return Digest::SHA1.hexdigest(self.text || '')
     end
   end
 
@@ -178,6 +191,17 @@ module Cfgsync
 
     protected
 
+    def self.on_file_missing(default)
+      return self.from_text(nil)
+    end
+
+    def self.on_file_read_error(exception, default)
+      $logger.warn(
+        "Cannot read config '#{@name}' from '#{@file_path}': #{exception.message}"
+      )
+      return self.from_text('')
+    end
+
     def get_version()
       return PCSConfig.new(self.text).data_version
     end
@@ -208,6 +232,17 @@ module Cfgsync
 
     protected
 
+    def self.on_file_missing(default)
+      return self.from_text(nil)
+    end
+
+    def self.on_file_read_error(exception, default)
+      $logger.warn(
+        "Cannot read config '#{@name}' from '#{@file_path}': #{exception.message}"
+      )
+      return self.from_text('')
+    end
+
     def get_version()
       return PCSTokens.new(self.text).data_version
     end
diff --git a/pcsd/config.rb b/pcsd/config.rb
index 011c2bb..c4b4c8a 100644
--- a/pcsd/config.rb
+++ b/pcsd/config.rb
@@ -16,13 +16,55 @@ class PCSConfig
 
     input_clusters = []
     input_permissions = {}
+    default_permissions = [
+      {
+        'type' => Permissions::TYPE_GROUP,
+        'name' => ADMIN_GROUP,
+        'allow' => [
+          Permissions::READ,
+          Permissions::WRITE,
+          Permissions::GRANT,
+        ]
+      },
+    ]
+
+    # set a reasonable default if file doesn't exist
+    # set default permissions for backwards compatibility (there is no way to
+    # differentiante between an old cluster without config and a new cluster
+    # without config)
+    # Since ADMIN_GROUP has access to pacemaker by default anyway, we can safely
+    # allow access in pcsd as well even for new clusters.
+    if cfg_text.nil?
+      @format_version = CURRENT_FORMAT
+      perm_list = []
+      default_permissions.each { |perm|
+        perm_list << Permissions::EntityPermissions.new(
+          perm['type'], perm['name'], perm['allow']
+        )
+      }
+      @permissions_local = Permissions::PermissionsSet.new(perm_list)
+      return
+    end
 
+    # set a reasonable default if got empty text (i.e. file exists but is empty)
+    if cfg_text.strip.empty?
+      @format_version = CURRENT_FORMAT
+      return
+    end
+
+    # main parsing
     begin
       json = JSON.parse(cfg_text)
-      if not(json.is_a?(Hash) and json.key?("format_version"))
+      if json.is_a?(Array)
         @format_version = 1
-      else
+      elsif (
+        json.is_a?(Hash) and
+        json.key?('format_version') and
+        json['format_version'].is_a?(Integer)
+      )
         @format_version = json["format_version"]
+      else
+        raise 'invalid file format'
       end
 
       if @format_version > CURRENT_FORMAT
@@ -43,19 +85,7 @@ class PCSConfig
         # All members of 'haclient' group had unrestricted access.
         # We give them access to most functions except reading tokens and keys,
         # they also won't be able to add and remove nodes because of that.
-        input_permissions = {
-          'local_cluster' => [
-            {
-              'type' => Permissions::TYPE_GROUP,
-              'name' => ADMIN_GROUP,
-              'allow' => [
-                Permissions::READ,
-                Permissions::WRITE,
-                Permissions::GRANT,
-              ]
-            },
-          ],
-        }
+        input_permissions = {'local_cluster' => default_permissions}
         # backward compatibility code end
       else
         $logger.error("Unable to parse pcs_settings file")
@@ -161,6 +191,12 @@ class PCSTokens
     @data_version = 0
     @tokens = {}
 
+    # set a reasonable parseable default if got empty text
+    if cfg_text.nil? or cfg_text.strip.empty?
+      @format_version = CURRENT_FORMAT
+      return
+    end
+
     begin
       json = JSON.parse(cfg_text)
       if not(json.is_a?(Hash) and json.key?('format_version') and json.key?('tokens'))
diff --git a/pcsd/fenceagent.rb b/pcsd/fenceagent.rb
index 8b0bc12..28c5980 100644
--- a/pcsd/fenceagent.rb
+++ b/pcsd/fenceagent.rb
@@ -1,95 +1,15 @@
-def getFenceAgents(auth_user, fence_agent = nil)
+def getFenceAgents()
   fence_agent_list = {}
   agents = Dir.glob('/usr/sbin/fence_' + '*')
   agents.each { |a|
     fa = FenceAgent.new
     fa.name =  a.sub(/.*\//,"")
     next if fa.name == "fence_ack_manual"
-
-    if fence_agent and a.sub(/.*\//,"") == fence_agent.sub(/.*:/,"")
-      required_options, optional_options, advanced_options, info = getFenceAgentMetadata(auth_user, fa.name)
-      fa.required_options = required_options
-      fa.optional_options = optional_options
-      fa.advanced_options = advanced_options
-      fa.info = info
-    end
     fence_agent_list[fa.name] = fa
   }
   fence_agent_list
 end
 
-def getFenceAgentMetadata(auth_user, fenceagentname)
-  options_required = {}
-  options_optional = {}
-  options_advanced = {
-      "priority" => "",
-      "pcmk_host_argument" => "",
-      "pcmk_host_map" => "",
-      "pcmk_host_list" => "",
-      "pcmk_host_check" => ""
-  }
-  for a in ["reboot", "list", "status", "monitor", "off"]
-    options_advanced["pcmk_" + a + "_action"] = ""
-    options_advanced["pcmk_" + a + "_timeout"] = ""
-    options_advanced["pcmk_" + a + "_retries"] = ""
-  end
-
-  # There are bugs in stonith_admin & the new fence_agents interaction
-  # eventually we'll want to switch back to this, but for now we directly
-  # call the agent to get metadata
-  #metadata = `stonith_admin --metadata -a #{fenceagentname}`
-  if not fenceagentname.start_with?('fence_') or fenceagentname.include?('/')
-    $logger.error "Invalid fence agent '#{fenceagentname}'"
-    return [options_required, options_optional, options_advanced]
-  end
-  stdout, stderr, retval = run_cmd(
-    auth_user, "/usr/sbin/#{fenceagentname}", '-o', 'metadata'
-  )
-  metadata = stdout.join
-  begin
-    doc = REXML::Document.new(metadata)
-  rescue REXML::ParseException => e
-    $logger.error(
-      "Unable to parse metadata of fence agent '#{resourcepath}': #{e}"
-    )
-    return [options_required, options_optional, options_advanced]
-  end
-
-  short_desc = ""
-  long_desc = ""
-  if doc.root
-    short_desc = doc.root.attributes["shortdesc"]
-  end
-  if short_desc == ""
-    doc.elements.each('resource-agent/shortdesc') {|sd|
-      short_desc = sd.text ? sd.text.strip : sd.text
-    }
-  end
-  doc.elements.each('resource-agent/longdesc') {|ld|
-    long_desc = ld.text ? ld.text.strip : ld.text
-  }
-
-  doc.elements.each('resource-agent/parameters/parameter') { |param|
-    temp_array = []
-    if param.elements["shortdesc"]
-      temp_array << param.elements["shortdesc"].text
-    else
-      temp_array << ""
-    end
-    if param.elements["longdesc"]
-      temp_array << param.elements["longdesc"].text
-    else
-      temp_array << ""
-    end
-    if param.attributes["required"] == "1" and param.attributes["name"] != "action"
-      options_required[param.attributes["name"]] = temp_array
-    else
-      options_optional[param.attributes["name"]] = temp_array
-    end
-  }
-  [options_required, options_optional, options_advanced, [short_desc, long_desc]]
-end
-
 class FenceAgent
   attr_accessor :name, :resource_class, :required_options, :optional_options, :advanced_options, :info
   def initialize(name=nil, required_options={}, optional_options={}, resource_class=nil, advanced_options={})
diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
index e441817..85cb95c 100644
--- a/pcsd/pcs.rb
+++ b/pcsd/pcs.rb
@@ -242,7 +242,7 @@ end
 
 # Gets all of the nodes specified in the pcs config file for the cluster
 def get_cluster_nodes(cluster_name)
-  pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+  pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
   clusters = pcs_config.clusters
   cluster = nil
   for c in clusters
@@ -420,7 +420,7 @@ def add_node(auth_user, new_nodename, all=false, auto_start=true)
   end
   $logger.info("Adding #{new_nodename} to pcs_settings.conf")
   corosync_nodes = get_corosync_nodes()
-  pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+  pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
   pcs_config.update_cluster($cluster_name, corosync_nodes)
   sync_config = Cfgsync::PcsdSettings.from_text(pcs_config.text())
   # on version conflict just go on, config will be corrected eventually
@@ -444,7 +444,7 @@ def remove_node(auth_user, new_nodename, all=false)
   end
   $logger.info("Removing #{new_nodename} from pcs_settings.conf")
   corosync_nodes = get_corosync_nodes()
-  pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+  pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
   pcs_config.update_cluster($cluster_name, corosync_nodes)
   sync_config = Cfgsync::PcsdSettings.from_text(pcs_config.text())
   # on version conflict just go on, config will be corrected eventually
@@ -1003,12 +1003,12 @@ def is_cib_true(var)
 end
 
 def read_tokens()
-  return PCSTokens.new(Cfgsync::PcsdTokens.from_file('').text()).tokens
+  return PCSTokens.new(Cfgsync::PcsdTokens.from_file().text()).tokens
 end
 
 def write_tokens(tokens)
   begin
-    cfg = PCSTokens.new(Cfgsync::PcsdTokens.from_file('').text())
+    cfg = PCSTokens.new(Cfgsync::PcsdTokens.from_file().text())
     cfg.tokens = tokens
     Cfgsync::PcsdTokens.from_text(cfg.text()).save()
   rescue
@@ -1124,7 +1124,7 @@ def pcs_auth(auth_user, nodes, username, password, force=false, local=true)
   }
   if not new_tokens.empty?
     cluster_nodes = get_corosync_nodes()
-    tokens_cfg = Cfgsync::PcsdTokens.from_file('')
+    tokens_cfg = Cfgsync::PcsdTokens.from_file()
     # only tokens used in pcsd-to-pcsd communication can and need to be synced
     # those are accessible only when running under root account
     if Process.uid != 0
@@ -1816,7 +1816,7 @@ def status_v1_to_v2(status)
 end
 
 def allowed_for_local_cluster(auth_user, action)
-  pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+  pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
   return pcs_config.permissions_local.allows?(
     auth_user[:username], auth_user[:usergroups], action
   )
diff --git a/pcsd/pcsd.conf b/pcsd/pcsd.conf
index 5ac8bc7..0ac5eec 100644
--- a/pcsd/pcsd.conf
+++ b/pcsd/pcsd.conf
@@ -1,9 +1,21 @@
 # pcsd configuration file
+
 # Set PCSD_DEBUG to true for advanced pcsd debugging information
 PCSD_DEBUG=false
 # Set DISABLE_GUI to true to disable GUI frontend in pcsd
-DISABLE_GUI=false
-# Set web UI sesions lifetime
-SESSION_LIFETIME=3600
+PCSD_DISABLE_GUI=false
+# Set web UI sesions lifetime in seconds
+PCSD_SESSION_LIFETIME=3600
+# List of IP addresses pcsd should bind to delimited by ',' character
+#PCSD_BIND_ADDR='::'
+
+# SSL settings
+# set SSL options delimited by ',' character
+# list of valid options can be obtained by running
+# ruby -e 'require "openssl"; puts OpenSSL::SSL.constants.grep /^OP_/'
+#PCSD_SSL_OPTIONS='OP_NO_SSLv2,OP_NO_SSLv3,OP_NO_TLSv1,OP_NO_TLSv1_1'
+# set SSL ciphers
+#PCSD_SSL_CIPHERS='DEFAULT:!RC4:!3DES:@STRENGTH!'
+
 # Do not change
 RACK_ENV=production
diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
index ea22775..6bf7db6 100644
--- a/pcsd/pcsd.rb
+++ b/pcsd/pcsd.rb
@@ -7,6 +7,8 @@ require 'webrick/https'
 require 'openssl'
 require 'logger'
 require 'thread'
+require 'fileutils'
+require 'cgi'
 
 require 'bootstrap.rb'
 require 'resource.rb'
@@ -44,7 +46,7 @@ rescue Errno::ENOENT
   File.open(COOKIE_FILE, 'w', 0700) {|f| f.write(secret)}
 end
 
-session_lifetime = ENV['SESSION_LIFETIME'].to_i()
+session_lifetime = ENV['PCSD_SESSION_LIFETIME'].to_i()
 session_lifetime = 60 * 60 unless session_lifetime > 0
 use SessionPoolLifetime,
   :expire_after => session_lifetime,
@@ -81,14 +83,16 @@ before do
     $session_storage_env = env
   end
 
-  if request.path != '/login' and not request.path == "/logout" and not request.path == '/remote/auth'
+  if request.path != '/login' and not request.path == "/logout" and not request.path == '/remote/auth' and not request.path == '/login-status'
     protected! 
   end
   $cluster_name = get_cluster_name()
 end
 
 configure do
-  DISABLE_GUI = (ENV['DISABLE_GUI'] and ENV['DISABLE_GUI'].downcase == 'true')
+  DISABLE_GUI = (
+    ENV['PCSD_DISABLE_GUI'] and ENV['PCSD_DISABLE_GUI'].downcase == 'true'
+  )
   PCS = get_pcs_path(File.expand_path(File.dirname(__FILE__)))
   logger = File.open("/var/log/pcsd/pcsd.log", "a+", 0600)
   STDOUT.reopen(logger)
@@ -365,7 +369,19 @@ if not DISABLE_GUI
 
   get '/logout' do
     session.destroy
-    redirect '/login'
+    if is_ajax?
+      halt [200, "OK"]
+    else
+      redirect '/login'
+    end
+  end
+
+  get '/login-status' do
+    if PCSAuth.isLoggedIn(session)
+      halt [200, session[:random]]
+    else
+      halt [401, '{"notauthorized":"true"}']
+    end
   end
 
   post '/login' do
@@ -386,8 +402,9 @@ if not DISABLE_GUI
       #      redirect plp
       #    else
       session.delete(:bad_login_name)
+      session[:random] = "#{Time.now.to_i}-#{rand(100)}"
       if is_ajax?
-        halt [200, "OK"]
+        halt [200, session[:random]]
       else
         redirect '/manage'
       end
@@ -403,7 +420,7 @@ if not DISABLE_GUI
   end
 
   post '/manage/existingcluster' do
-    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
     node = params['node-name']
     code, result = send_request_with_token(
       PCSAuth.getSuperuserAuth(), node, 'status'
@@ -421,7 +438,7 @@ if not DISABLE_GUI
       nodes = status["corosync_offline"] + status["corosync_online"]
 
       if status["cluster_name"] == ''
-        return 400, "The node, '#{noname}', does not currently have a cluster
+        return 400, "The node, '#{node}', does not currently have a cluster
  configured.  You must create a cluster using this node before adding it to pcsd."
       end
 
@@ -446,7 +463,7 @@ already been added to pcsd.  You may not add two clusters with the same name int
           return 400, "Unable to get authentication info from cluster '#{status['cluster_name']}'."
         end
 
-        sync_config = Cfgsync::PcsdTokens.from_file('')
+        sync_config = Cfgsync::PcsdTokens.from_file()
         pushed, _ = Cfgsync::save_sync_new_tokens(
           sync_config, new_tokens, get_corosync_nodes(), $cluster_name
         )
@@ -479,7 +496,7 @@ already been added to pcsd.  You may not add two clusters with the same name int
 
     warning_messages = []
 
-    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
     @manage = true
     @cluster_name = params[:clustername]
     @nodes = []
@@ -553,7 +570,7 @@ already been added to pcsd.  You may not add two clusters with the same name int
         # we are waiting for the request to finish, so no locking is needed.
         # If we are in a different cluster we just try twice to update the
         # config, dealing with any updates in between.
-        pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+        pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
         pcs_config.clusters << Cluster.new(@cluster_name, @nodes)
         sync_config = Cfgsync::PcsdSettings.from_text(pcs_config.text())
         pushed, _ = Cfgsync::save_sync_new_version(
@@ -572,7 +589,7 @@ already been added to pcsd.  You may not add two clusters with the same name int
   end
 
   post '/manage/removecluster' do
-    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
     params.each { |k,v|
       if k.start_with?("clusterid-")
         pcs_config.remove_cluster(k.sub("clusterid-",""))
@@ -671,7 +688,7 @@ already been added to pcsd.  You may not add two clusters with the same name int
 
     if not new_tokens.empty?
       cluster_nodes = get_corosync_nodes()
-      tokens_cfg = Cfgsync::PcsdTokens.from_file('')
+      tokens_cfg = Cfgsync::PcsdTokens.from_file()
       sync_successful, sync_responses = Cfgsync::save_sync_new_tokens(
         tokens_cfg, new_tokens, cluster_nodes, $cluster_name
       )
@@ -691,7 +708,7 @@ already been added to pcsd.  You may not add two clusters with the same name int
 
   get '/permissions/?' do
     @manage = true
-    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
     @clusters = pcs_config.clusters.sort { |a, b| a.name <=> b.name }
     erb :permissions, :layout => :main
   end
@@ -705,7 +722,7 @@ already been added to pcsd.  You may not add two clusters with the same name int
     @user_types = []
     @users_permissions = []
 
-    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
 
     if not pcs_config.is_cluster_name_in_use(@cluster_name)
       @error = 'Cluster not found'
@@ -741,7 +758,7 @@ already been added to pcsd.  You may not add two clusters with the same name int
   get '/managec/:cluster/main' do
     auth_user = PCSAuth.sessionToAuthUser(session)
     @cluster_name = params[:cluster]
-    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
     @clusters = pcs_config.clusters
     @nodes = get_cluster_nodes(params[:cluster])
     if @nodes == []
@@ -946,6 +963,175 @@ already been added to pcsd.  You may not add two clusters with the same name int
     end
   end
 
+  get '/managec/:cluster/get_resource_agent_metadata' do
+    auth_user = PCSAuth.sessionToAuthUser(session)
+    cluster = params[:cluster]
+    resource_agent = params[:agent]
+    code, out = send_cluster_request_with_token(
+      auth_user,
+      cluster,
+      'get_resource_agent_metadata',
+      false,
+      {:resource_agent => resource_agent}
+    )
+    if code != 404
+      return [code, out]
+    end
+
+    code, out = send_cluster_request_with_token(
+      auth_user,
+      cluster,
+      'resource_metadata',
+      false,
+      {
+        :resourcename => resource_agent,
+        :new => true
+      }
+    )
+    if code != 200
+      return [400, 'Unable to get meta-data of specified resource agent.']
+    end
+    desc_regex = Regexp.new(
+      '<span class="reg[^>]*>(?<short>[^>]*) </span>[^<]*' +
+        '<span title="(?<long>[^"]*)"'
+    )
+    parameters_regex = Regexp.new(
+      '<input type="hidden" name="resource_type"[^>]*>(?<required>[\s\S]*)' +
+        '<div class="bold">Optional Arguments:</div>(?<optional>[\S\s]*)' +
+        '<tr class="stop">'
+    )
+    parameter_regex = Regexp.new(
+      '<tr title="(?<longdesc>[^"]*)"[^>]*>[\s]*<td class="reg">\s*' +
+        '(?<name>[^<\s]*)\s*</td>\s*<td>\s*' +
+        '<input placeholder="(?<shortdesc>[^"]*)"'
+    )
+
+    desc = desc_regex.match(out)
+    unless desc
+      return [400, 'Unable to get meta-data of specified resource agent.']
+    end
+    result = {
+      :name => resource_agent,
+      :shortdesc => html2plain(desc[:short]),
+      :longdesc => html2plain(desc[:long]),
+      :parameters => []
+    }
+
+    parameters = parameters_regex.match(out)
+    parameters[:required].scan(parameter_regex) { |match|
+      result[:parameters] << {
+        :name => html2plain(match[1]),
+        :longdesc => html2plain(match[0]),
+        :shortdesc => html2plain(match[2]),
+        :type => 'string',
+        :required => true
+      }
+    }
+    parameters[:optional].scan(parameter_regex) { |match|
+      result[:parameters] << {
+        :name => html2plain(match[1]),
+        :longdesc => html2plain(match[0]),
+        :shortdesc => html2plain(match[2]),
+        :type => 'string',
+        :required => false
+      }
+    }
+    return [200, JSON.generate(result)]
+  end
+
+  get '/managec/:cluster/get_fence_agent_metadata' do
+    auth_user = PCSAuth.sessionToAuthUser(session)
+    cluster = params[:cluster]
+    fence_agent = params[:agent]
+    code, out = send_cluster_request_with_token(
+      auth_user,
+      cluster,
+      'get_fence_agent_metadata',
+      false,
+      {:fence_agent => fence_agent}
+    )
+    if code != 404
+      return [code, out]
+    end
+
+    code, out = send_cluster_request_with_token(
+      auth_user,
+      cluster,
+      'fence_device_metadata',
+      false,
+      {
+        :resourcename => fence_agent.sub('stonith:', ''),
+        :new => true
+      }
+    )
+    if code != 200
+      return [400, 'Unable to get meta-data of specified fence agent.']
+    end
+    desc_regex = Regexp.new(
+      '<span class="reg[^>]*>(?<short>[^>]*) </span>[^<]*' +
+        '<span title="(?<long>[^"]*)"'
+    )
+    parameters_regex = Regexp.new(
+      '<input type="hidden" name="resource_type"[^>]*>(?<required>[\s\S]*)' +
+        '<div class="bold">Optional Arguments:</div>(?<optional>[\S\s]*)' +
+        '<div class="bold">Advanced Arguments:</div>(?<advanced>[\S\s]*)' +
+        '<tr class="stop">'
+    )
+    required_parameter_regex = Regexp.new(
+      '<tr title="(?<longdesc>[^"]*)[^>]*>[\s]*' +
+        '<td class="reg">\s* (?<name>[^<\s]*)\s*</td>\s*<td>\s*' +
+        '<input placeholder="(?<shortdesc>[^"]*)"'
+    )
+    other_parameter_regex = Regexp.new(
+      '<td class="reg">\s* (?<name>[^<\s]*)\s*</td>\s*<td>\s*' +
+        '<input placeholder="(?<shortdesc>[^"]*)"'
+    )
+
+    desc = desc_regex.match(out)
+    unless desc
+      return [400, 'Unable to get meta-data of specified fence agent.']
+    end
+    result = {
+      :name => fence_agent,
+      :shortdesc => html2plain(desc[:short]),
+      :longdesc => html2plain(desc[:long]),
+      :parameters => []
+    }
+
+    parameters = parameters_regex.match(out)
+    parameters[:required].scan(required_parameter_regex) { |match|
+      result[:parameters] << {
+        :name => html2plain(match[1]),
+        :longdesc => html2plain(match[0]),
+        :shortdesc => html2plain(match[2]),
+        :type => 'string',
+        :required => true,
+        :advanced => false
+      }
+    }
+    parameters[:optional].scan(other_parameter_regex) { |match|
+      result[:parameters] << {
+        :name => html2plain(match[0]),
+        :longdesc => '',
+        :shortdesc => html2plain(match[1]),
+        :type => 'string',
+        :required => false,
+        :advanced => false
+      }
+    }
+    parameters[:advanced].scan(other_parameter_regex) { |match|
+      result[:parameters] << {
+        :name => html2plain(match[0]),
+        :longdesc => '',
+        :shortdesc => html2plain(match[1]),
+        :type => 'string',
+        :required => false,
+        :advanced => true
+      }
+    }
+    return [200, JSON.generate(result)]
+  end
+
   post '/managec/:cluster/fix_auth_of_cluster' do
     clustername = params[:cluster]
     unless clustername
@@ -1097,6 +1283,10 @@ class Node
   end
 end
 
+def html2plain(text)
+  return CGI.unescapeHTML(text).gsub(/<br[^>]*>/, "\n")
+end
+
 helpers do
   def h(text)
     Rack::Utils.escape_html(text)
diff --git a/pcsd/public/css/style.css b/pcsd/public/css/style.css
index 1c003bd..95535f0 100644
--- a/pcsd/public/css/style.css
+++ b/pcsd/public/css/style.css
@@ -786,3 +786,7 @@ li.menuheader {
 .status-warning {
   color: #ff6600;
 }
+
+table.args-table td.reg {
+  width: 17em;
+}
diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js
index 237d7ce..4ff4ebc 100644
--- a/pcsd/public/js/nodes-ember.js
+++ b/pcsd/public/js/nodes-ember.js
@@ -171,16 +171,24 @@ Pcs = Ember.Application.createWithMixins({
 
           Ember.run.scheduleOnce('afterRender', Pcs, function () {
             if (self.get('cur_fence')) {
-              if (fence_change)
+              if (fence_change) {
+                if (first_run) {
+                  update_instance_attributes(self.get('cur_fence').get('id'));
+                }
                 tree_view_onclick(self.get('cur_fence').get('id'), true);
-              else
+              } else {
                 tree_view_select(self.get('cur_fence').get('id'));
+              }
             }
             if (self.get('cur_resource')) {
-              if (resource_change)
+              if (resource_change) {
+                if (first_run) {
+                  update_instance_attributes(self.get('cur_resource').get('id'));
+                }
                 tree_view_onclick(self.get('cur_resource').get('id'), true);
-              else
+              } else {
                 tree_view_select(self.get('cur_resource').get('id'));
+              }
             }
             Pcs.selectedNodeController.reset();
             disable_checkbox_clicks();
@@ -246,6 +254,26 @@ Pcs.ClusterPropertyComponent = Ember.Component.extend({
   ]
 });
 
+Pcs.ParametersTableComponent = Ember.Component.extend({
+  parameters: [],
+  show_content: false,
+  show_title: true,
+  table_name: "",
+  table_id: "",
+  content_style: function() {
+    return ("display: " + (this.get("show_content") ? "block" : "none"));
+  }.property("show_content"),
+  actions: {
+    toggleBody: function() {
+      this.toggleProperty('show_content');
+    }
+  }
+});
+
+Pcs.ParametersTableElementComponent = Ember.Component.extend({
+  tagName: "tr"
+});
+
 Pcs.UtilizationTableComponent = Ember.Component.extend({
   entity: null,
   type: "node", // node or resource
@@ -385,6 +413,8 @@ Pcs.resourcesContainer = Ember.Object.create({
   constraints: {},
   group_list: [],
   data_version: null,
+  new_resource_agent_metadata: null,
+  new_fence_agent_metadata: null,
 
   get_resource_by_id: function(resource_id) {
     var resource_map = this.get('resource_map');
@@ -647,7 +677,6 @@ Pcs.resourcesContainer = Ember.Object.create({
     var constraints = self.get_constraints(data["constraints"]);
     self.set('constraints', constraints);
     var resource_map = self.get('resource_map');
-    update_resource_form_groups($("#new_resource_agent"), self.get('group_list').sort());
     $.each(constraints, function(const_type, cons) {
       $.each(resource_map, function(resource_id, resource_obj) {
         if (resource_id in cons) {
@@ -668,7 +697,18 @@ Pcs.resourcesContainer = Ember.Object.create({
 Pcs.resourcesContainer.reopen({
   is_version_1: function() {
     return (this.get("data_version") == '1');
-  }.property('data_version')
+  }.property('data_version'),
+  groups_enum: function() {
+    var self = this;
+    var res = [];
+    $.each(self.get("group_list"), function(_, group) {
+      res.push({
+        name: group,
+        value: group
+      });
+    });
+    return res;
+  }.property("group_list")
 });
 
 Pcs.ResourceObj = Ember.Object.extend({
@@ -846,6 +886,7 @@ Pcs.ResourceOperationObj = Ember.Object.extend({
 });
 
 Pcs.PrimitiveObj = Pcs.ResourceObj.extend({
+  resource_agent: null,
   agentname: null,
   provider: null,
   type: null,
@@ -854,7 +895,13 @@ Pcs.PrimitiveObj = Pcs.ResourceObj.extend({
   instance_status: [],
   operations: [],
   utilization: [],
-  resource_type: Ember.computed.alias('agentname'),
+  resource_type: function() {
+    var agent = this.get("agentname");
+    if (agent) {
+      return agent.replace("::", ":");
+    }
+    return agent;
+  }.property("agentname"),
   is_primitive: true,
   nodes_running_on: function() {
     var self = this;
@@ -900,6 +947,8 @@ Pcs.PrimitiveObj = Pcs.ResourceObj.extend({
 
 Pcs.GroupObj = Pcs.ResourceObj.extend({
   members: [],
+  //for internal usage
+  _members: [],
   is_group: true,
   children: Ember.computed.alias('members'),
 
@@ -919,20 +968,35 @@ Pcs.GroupObj = Pcs.ResourceObj.extend({
 
   refresh: function() {
     var self = this;
-    var members = self.get("members");
-    var member;
     var new_members = [];
-    $.each(members, function(i,v) {
-      member = Pcs.PrimitiveObj.create(v);
+    var member;
+    var old_members = {};
+    // Property 'members' is filled by constructor or update method, therefor
+    // properties 'members' and '_members' are now different. We need to update
+    // only old members and create new objects for new ones.
+    $.each(self.get("_members"), function(_, m) {
+      old_members[m.get("id")] = m;
+    });
+
+    $.each(self.get("members"), function(_,m) {
+      if (m.id in old_members) {
+        old_members[m.id].update(old_members[m.id], m);
+        member = old_members[m.id];
+      } else {
+        member = Pcs.PrimitiveObj.create(m);
+      }
       member.set('parent', self);
       new_members.push(member);
     });
     self.set("members", new_members);
+    self.set("_members", new_members);
   }
 });
 
 Pcs.MultiInstanceObj = Pcs.ResourceObj.extend({
   member: null,
+  //for internal usage
+  _member: null,
   children: function() {
     return [this.get('member')];
   }.property('member'),
@@ -956,16 +1020,34 @@ Pcs.MultiInstanceObj = Pcs.ResourceObj.extend({
   refresh: function() {
     var self = this;
     var member = self.get("member");
+    var old_member = self.get("_member");
     var new_member = null;
-    switch (member.class_type) {
-      case "primitive":
-        new_member = Pcs.PrimitiveObj.create(member);
-        break;
-      case "group":
-        new_member = Pcs.GroupObj.create(member);
+    // Property 'member' is filled by constructor or update method, therefor
+    // properties 'member' and '_member' are now different. We need to
+    // create new object only if there is no resource with same id and same
+    // type. Otherwise, we need to create new object.
+    if (!old_member) {
+      old_member = Pcs.resourcesContainer.get_resource_by_id(member.id);
+    }
+    if (
+      old_member &&
+      member.id == old_member.get("id") &&
+      member.class_type == old_member.get("class_type")
+    ) {
+      old_member.update(old_member, member);
+      new_member = old_member;
+    } else {
+      switch (member.class_type) {
+        case "primitive":
+          new_member = Pcs.PrimitiveObj.create(member);
+          break;
+        case "group":
+          new_member = Pcs.GroupObj.create(member);
+      }
     }
     new_member.set('parent', self);
     self.set("member", new_member);
+    self.set("_member", new_member);
   }
 });
 
@@ -979,6 +1061,99 @@ Pcs.MasterSlaveObj = Pcs.MultiInstanceObj.extend({
   resource_type: 'Master/Slave'
 });
 
+Pcs.ResourceAgentParameter = Ember.Object.extend({
+  name: "",
+  readable_name: Ember.computed.alias("name"),
+  form_name: function() {
+    var name = "_res_param";
+    var val = this.get("value");
+    name += ((!val || val == "") ? "empty_" : "ne_");
+    return name + this.get("name");
+  }.property("name", "value"),
+  type: "string",
+  value: null,
+  cur_val: Ember.computed.oneWay("value"),
+  required: false,
+  advanced: false,
+  longdesc: "",
+  longdesc_html: function() {
+    return nl2br(htmlEncode(this.get("longdesc")));
+  }.property("longdesc"),
+  shortdesc: "",
+  "default": null,
+  description: function() {
+    var shortdesc = nl2br(htmlEncode(this.get("shortdesc")));
+    var longdesc = nl2br(htmlEncode(this.get("longdesc")));
+    if (longdesc == shortdesc) longdesc = "";
+    var def_val = this.get("default");
+    def_val = nl2br(htmlEncode((def_val) ? def_val : ""));
+    var desc = [];
+    if (shortdesc) desc.push(shortdesc);
+    if (longdesc) desc.push(longdesc);
+    if (def_val) desc.push("Default value: " + def_val);
+    return desc.join("<br /><br />");
+  }.property("longdesc", "shortdesc", "default")
+});
+
+Pcs.ResourceAgent = Ember.Object.extend({
+  name: "",
+  longdesc: "",
+  longdesc_html: function() {
+    return nl2br(htmlEncode(this.get("longdesc")));
+  }.property("longdesc"),
+  shortdesc: "",
+  parameters: [],
+  required_parameters: function() {
+    var self = this;
+    var args = [];
+    $.each(self.get("parameters"), function(_, arg) {
+      if (arg.get("required")) {
+        args.pushObject(arg);
+      }
+    });
+    return args;
+  }.property("parameters. at each"),
+  optional_parameters: function() {
+    var self = this;
+    var args = [];
+    $.each(self.get("parameters"), function(_, arg) {
+      if (!arg.get("required") && !arg.get("advanced")) {
+        args.pushObject(arg);
+      }
+    });
+    return args;
+  }.property("parameters. at each"),
+  advanced_parameters: function() {
+    var self = this;
+    var args = [];
+    $.each(self.get("parameters"), function(_, arg) {
+      if (!arg.get("required") && arg.get("advanced")) {
+        args.pushObject(arg);
+      }
+    });
+    return args;
+  }.property("parameters. at each"),
+  get_parameter: function(name) {
+    var self = this;
+    var res = null;
+    $.each(self.get("parameters"), function(_, arg) {
+      if (arg && arg.get("name") == name) {
+        res = arg;
+        return false; // break
+      }
+    });
+    return res;
+  },
+  init: function() {
+    var self = this;
+    var args = [];
+    $.each(self.get("parameters"), function(_, arg) {
+      args.pushObject(Pcs.ResourceAgentParameter.create(arg));
+    });
+    self.set("parameters", Ember.copy(args));
+  }
+});
+
 Pcs.Router.map(function() {
   this.route("Configuration", { path: "configure"});
 
@@ -1112,7 +1287,7 @@ Pcs.Setting = Ember.Object.extend({
   cur_val: Ember.computed.oneWay('value'),
   type: null,
   source: "",
-  default: null,
+  "default": null,
   advanced: false,
   longdesc: "",
   shortdesc: "",
@@ -1133,7 +1308,7 @@ Pcs.Setting = Ember.Object.extend({
   is_enum: function() {
     return (this.get("type") == "enum");
   }.property("type"),
-  enum: [],
+  "enum": [],
   enum_show: function() {
     var self = this;
     var out = [];
@@ -1747,6 +1922,7 @@ Pcs.settingsController = Ember.Controller.create({
   properties: [],
   filtered: [],
   show_advanced: false,
+  error: false,
   filter: "",
   update: function(properties_definition) {
     var self = this;
@@ -1780,6 +1956,7 @@ Pcs.settingsController = Ember.Controller.create({
         return a.get('name').localeCompare(b.get('name'));
       }
     }));
+    self.set("error", false);
   }
 });
 
diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
index 25cc1ac..254f390 100644
--- a/pcsd/public/js/pcsd.js
+++ b/pcsd/public/js/pcsd.js
@@ -234,20 +234,25 @@ function create_node(form) {
 // If update is set to true we update the resource instead of create it
 // if stonith is set to true we update/create a stonith agent
 function create_resource(form, update, stonith) {
-  dataString = $(form).serialize();
-  var resourceID = $(form).find("[name='name']").val(); 
-  url = get_cluster_remote_url() + $(form).attr("action");
+  var data = {};
+  $($(form).serializeArray()).each(function(index, obj) {
+    data[obj.name] = obj.value;
+  });
+  data["resource_type"] = data["resource_type"].replace("::", ":");
+  var url = get_cluster_remote_url() + $(form).attr("action");
   var name;
 
-  if (stonith)
+  if (stonith) {
     name = "fence device";
-  else
-    name = "resource"
+    data["resource_type"] = data["resource_type"].replace("stonith:", "");
+  } else {
+    name = "resource";
+  }
 
   ajax_wrapper({
     type: "POST",
     url: url,
-    data: dataString,
+    data: data,
     dataType: "json",
     success: function(returnValue) {
       $('input.apply_changes').show();
@@ -291,37 +296,15 @@ function disable_spaces(item) {
   });
 }
 
-function load_resource_form(item, ra, stonith) {
-  var data = { new: true, resourcename: ra};
-  var command;
-  if (!stonith)
-    command = "resource_metadata";
-  else
-    command = "fence_device_metadata";
-  
-  item.load(get_cluster_remote_url() + command, data);
-}
-
-function update_resource_form_groups(form, group_list) {
-  var select = $(form).find("select[name='resource_group']").first();
-  if (select.length < 1) {
+function load_resource_form(agent_name, stonith) {
+  stonith = typeof stonith !== 'undefined' ? stonith : false;
+  if (!agent_name) {
     return;
   }
-  var selected = select.val();
-  var selected_valid = false;
-  var select_new = select.clone();
-  select_new.empty();
-  select_new.append('<option value="">None</options>');
-  $.each(group_list, function(index, group) {
-    select_new.append('<option value="' + group + '">' + group + '</options>');
-    if (selected == group) {
-      selected_valid = true;
-    }
-  });
-  if (selected_valid) {
-    select_new.val(selected);
-  }
-  select.replaceWith(select_new);
+  var prop_name = "new_" + (stonith ? "fence" : "resource") + "_agent_metadata";
+  get_resource_agent_metadata(agent_name, function (data) {
+      Pcs.resourcesContainer.set(prop_name, Pcs.ResourceAgent.create(data));
+  }, stonith);
 }
 
 function verify_remove(remove_func, forceable, checklist_id, dialog_id, label, ok_text, title, remove_id) {
@@ -1184,36 +1167,6 @@ function load_row(node_row, ac, cur_elem, containing_elem, also_set, initial_loa
   });
 }
 
-function load_agent_form(resource_id, stonith) {
-  var url;
-  var form;
-  if (stonith) {
-    form = $("#stonith_agent_form");
-    url = '/managec/' + Pcs.cluster_name + '/fence_device_form';
-  } else {
-    form = $("#resource_agent_form");
-    url = '/managec/' + Pcs.cluster_name + '/resource_form?version=2';
-  }
-
-  form.empty();
-
-  var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id);
-  if (!resource_obj || !resource_obj.get('is_primitive'))
-    return;
-
-  var data = {resource: resource_id};
-
-  ajax_wrapper({
-    type: 'GET',
-    url: url,
-    data: data,
-    timeout: pcs_timeout,
-    success: function (data) {
-      Ember.run.next(function(){form.html(data);});
-    }
-  });
-}
-
 function show_loading_screen() {
   $("#loading_screen_progress_bar").progressbar({ value: 100});
   $("#loading_screen").dialog({
@@ -1841,11 +1794,7 @@ function refresh_cluster_properties() {
       Pcs.settingsController.update(data);
     },
     error: function (xhr, status, error) {
-      alert(
-        "Unable to get cluster properties: "
-        + ajax_simple_error(xhr, status, error)
-      );
-      Pcs.settingsController.update({});
+      Pcs.settingsController.set("error", true);
     },
     complete: function() {
       hide_loading_screen();
@@ -2072,6 +2021,40 @@ function auto_show_hide_constraints() {
   });
 }
 
+function get_resource_agent_metadata(agent, on_success, stonith) {
+  stonith = typeof stonith !== 'undefined' ? stonith : false;
+  var request = (stonith)
+    ? 'get_fence_agent_metadata'
+    : 'get_resource_agent_metadata';
+  ajax_wrapper({
+    url: get_cluster_remote_url() + request,
+    dataType: "json",
+    data: {agent: agent},
+    timeout: pcs_timeout,
+    success: on_success,
+    error: function (xhr, status, error) {
+      alert(
+        "Unable to get metadata for resource agent '" + agent + "' "
+        + ajax_simple_error(xhr, status, error)
+      );
+    }
+  })
+}
+
+function update_instance_attributes(resource_id) {
+  var res_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id);
+  if (!(res_obj && res_obj.get("is_primitive"))) {
+    return;
+  }
+  get_resource_agent_metadata(res_obj.get("resource_type"), function(data) {
+    var agent = Pcs.ResourceAgent.create(data);
+    res_obj.set("resource_agent", agent);
+    $.each(res_obj.get("instance_attr"), function(_, attr) {
+      agent.get_parameter(attr.name).set("value", attr.value);
+    });
+  }, res_obj.get("stonith"));
+}
+
 function tree_view_onclick(resource_id, auto) {
   auto = typeof auto !== 'undefined' ? auto : false;
   var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id);
@@ -2081,18 +2064,21 @@ function tree_view_onclick(resource_id, auto) {
   }
   if (resource_obj.get('stonith')) {
     Pcs.resourcesContainer.set('cur_fence', resource_obj);
-    if (!auto) window.location.hash = "/fencedevices/" + resource_id;
+    if (!auto) {
+      window.location.hash = "/fencedevices/" + resource_id;
+      update_instance_attributes(resource_id);
+    }
   } else {
     Pcs.resourcesContainer.set('cur_resource', resource_obj);
-    if (!auto) window.location.hash = "/resources/" + resource_id;
+
+    if (!auto) {
+      window.location.hash = "/resources/" + resource_id;
+      update_instance_attributes(resource_id);
+    }
     auto_show_hide_constraints();
   }
 
   tree_view_select(resource_id);
-
-  Ember.run.next(Pcs, function() {
-    load_agent_form(resource_id, resource_obj.get('stonith'));
-  });
 }
 
 function tree_view_select(element_id) {
@@ -2735,3 +2721,7 @@ Ember.Handlebars.helper('selector-helper', function (content, value, place_holde
   });
   return new Handlebars.SafeString(out);
 });
+
+function nl2br(text) {
+  return text.replace(/(?:\r\n|\r|\n)/g, '<br />');
+}
diff --git a/pcsd/remote.rb b/pcsd/remote.rb
index abf2683..443c6ed 100644
--- a/pcsd/remote.rb
+++ b/pcsd/remote.rb
@@ -65,15 +65,12 @@ def remote(params, request, auth_user)
       :get_cluster_properties_definition => method(:get_cluster_properties_definition)
   }
   remote_cmd_with_pacemaker = {
+      :pacemaker_node_status => method(:remote_pacemaker_node_status),
       :resource_start => method(:resource_start),
       :resource_stop => method(:resource_stop),
       :resource_cleanup => method(:resource_cleanup),
-      :resource_form => method(:resource_form),
-      :fence_device_form => method(:fence_device_form),
       :update_resource => method(:update_resource),
       :update_fence_device => method(:update_fence_device),
-      :resource_metadata => method(:resource_metadata),
-      :fence_device_metadata => method(:fence_device_metadata),
       :get_avail_resource_agents => method(:get_avail_resource_agents),
       :get_avail_fence_agents => method(:get_avail_fence_agents),
       :remove_resource => method(:remove_resource),
@@ -97,7 +94,9 @@ def remote(params, request, auth_user)
       :resource_unclone => method(:resource_unclone),
       :resource_ungroup => method(:resource_ungroup),
       :set_resource_utilization => method(:set_resource_utilization),
-      :set_node_utilization => method(:set_node_utilization)
+      :set_node_utilization => method(:set_node_utilization),
+      :get_resource_agent_metadata => method(:get_resource_agent_metadata),
+      :get_fence_agent_metadata => method(:get_fence_agent_metadata)
   }
 
   command = params[:command].to_sym
@@ -133,7 +132,7 @@ def cluster_status_gui(auth_user, cluster_name, dont_update_config=false)
   new_cluster_nodes.uniq!
 
   if new_cluster_nodes.length > 0
-    config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+    config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
     if !(dont_update_config or config.cluster_nodes_equal?(cluster_name, new_cluster_nodes))
       old_cluster_nodes = config.get_nodes(cluster_name)
       $logger.info("Updating node list for: #{cluster_name} #{old_cluster_nodes}->#{new_cluster_nodes}")
@@ -189,9 +188,13 @@ def cluster_start(params, request, auth_user)
       return 403, 'Permission denied'
     end
     $logger.info "Starting Daemons"
-    output =  `#{PCS} cluster start`
+    output, stderr, retval = run_cmd(auth_user, PCS, 'cluster', 'start')
     $logger.debug output
-    return output
+    if retval != 0
+      return [400, (output + stderr).join]
+    else
+      return output
+    end
   end
 end
 
@@ -310,7 +313,7 @@ def node_standby(params, request, auth_user)
       return 403, 'Permission denied'
     end
     $logger.info "Standby Node"
-    stdout, stderr, retval = run_cmd(auth_user, PCS, "cluster", "standby")
+    stdout, stderr, retval = run_cmd(auth_user, PCS, "node", "standby")
     return stdout
   end
 end
@@ -326,7 +329,7 @@ def node_unstandby(params, request, auth_user)
       return 403, 'Permission denied'
     end
     $logger.info "Unstandby Node"
-    stdout, stderr, retval = run_cmd(auth_user, PCS, "cluster", "unstandby")
+    stdout, stderr, retval = run_cmd(auth_user, PCS, "node", "unstandby")
     return stdout
   end
 end
@@ -624,7 +627,7 @@ def get_permissions_remote(params, request, auth_user)
     return 403, 'Permission denied'
   end
 
-  pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+  pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
   data = {
     'user_types' => Permissions::get_user_types(),
     'permission_types' => Permissions::get_permission_types(),
@@ -690,7 +693,7 @@ def set_permissions_remote(params, request, auth_user)
   perm_set = Permissions::PermissionsSet.new(perm_list)
 
   full_users_old = Set.new
-  pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+  pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
   pcs_config.permissions_local.entity_permissions_list.each{ |entity_perm|
     if entity_perm.allow_list.include?(Permissions::FULL)
       full_users_old << [entity_perm.type, entity_perm.name]
@@ -715,7 +718,7 @@ def set_permissions_remote(params, request, auth_user)
   end
 
   2.times {
-    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
     pcs_config.permissions_local = perm_set
     sync_config = Cfgsync::PcsdSettings.from_text(pcs_config.text())
     pushed, _ = Cfgsync::save_sync_new_version(
@@ -807,7 +810,7 @@ def remote_remove_nodes(params, request, auth_user)
     retval, output = remove_node(auth_user, node, true)
     out = out + output.join("\n")
   }
-  config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+  config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
   if config.get_nodes($cluster_name) == nil or config.get_nodes($cluster_name).length == 0
     return [200,"No More Nodes"]
   end
@@ -904,6 +907,18 @@ def create_cluster(params, request, auth_user)
   end
 end
 
+def remote_pacemaker_node_status(params, request, auth_user)
+  if not allowed_for_local_cluster(auth_user, Permissions::READ)
+    return 403, 'Permission denied'
+  end
+  output, stderr, retval = run_cmd(auth_user, PCS, 'node', 'pacemaker-status')
+  if retval != 0
+    return [400, stderr]
+  else
+    return output
+  end
+end
+
 def node_status(params, request, auth_user)
   if params[:node] and params[:node] != '' and params[:node] !=
     $cur_node_name and !params[:redirected]
@@ -1058,7 +1073,7 @@ def status_all(params, request, auth_user, nodes=[], dont_update_config=false)
 
   node_list.uniq!
   if node_list.length > 0
-    config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+    config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
     old_node_list = config.get_nodes(params[:cluster])
     if !(dont_update_config or config.cluster_nodes_equal?(params[:cluster], node_list))
       $logger.info("Updating node list for: #{params[:cluster]} #{old_node_list}->#{node_list}")
@@ -1080,7 +1095,7 @@ def clusters_overview(params, request, auth_user)
   cluster_map = {}
   forbidden_clusters = {}
   threads = []
-  config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+  config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
   config.clusters.each { |cluster|
     threads << Thread.new {
       cluster_map[cluster.name] = {
@@ -1212,7 +1227,7 @@ def clusters_overview(params, request, auth_user)
 
   # update clusters in PCSConfig
   not_current_data = false
-  config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+  config = PCSConfig.new(Cfgsync::PcsdSettings.from_file().text())
   cluster_map.each { |cluster, values|
     next if forbidden_clusters[cluster]
     nodes = []
@@ -1344,56 +1359,6 @@ def resource_start(params, request, auth_user)
   end
 end
 
-def resource_form(params, request, auth_user)
-  if not allowed_for_local_cluster(auth_user, Permissions::READ)
-    return 403, 'Permission denied'
-  end
-
-  cib_dom = get_cib_dom(auth_user)
-  @cur_resource = get_resource_by_id(params[:resource], cib_dom)
-  @groups = get_resource_groups(cib_dom)
-  @version = params[:version]
-
-  if @cur_resource.instance_of?(ClusterEntity::Primitive) and !@cur_resource.stonith
-    @cur_resource_group = @cur_resource.get_group
-    @cur_resource_clone = @cur_resource.get_clone
-    @cur_resource_ms = @cur_resource.get_master
-    @resource = ResourceAgent.new(@cur_resource.agentname)
-    if @cur_resource.provider == 'heartbeat'
-      @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(auth_user, HEARTBEAT_AGENTS_DIR + @cur_resource.type)
-    elsif @cur_resource.provider == 'pacemaker'
-      @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(auth_user, PACEMAKER_AGENTS_DIR + @cur_resource.type)
-    elsif @cur_resource._class == 'nagios'
-      @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(auth_user, NAGIOS_METADATA_DIR + @cur_resource.type + '.xml')
-    end
-    @existing_resource = true
-    if @resource
-      erb :resourceagentform
-    else
-      "Can't find resource"
-    end
-  else
-    "Resource #{params[:resource]} doesn't exist"
-  end
-end
-
-def fence_device_form(params, request, auth_user)
-  if not allowed_for_local_cluster(auth_user, Permissions::READ)
-    return 403, 'Permission denied'
-  end
-
-  @cur_resource = get_resource_by_id(params[:resource], get_cib_dom(auth_user))
-
-  if @cur_resource.instance_of?(ClusterEntity::Primitive) and @cur_resource.stonith
-    @resource_agents = getFenceAgents(auth_user, @cur_resource.agentname)
-    @existing_resource = true
-    @fenceagent = @resource_agents[@cur_resource.type]
-    erb :fenceagentform
-  else
-    "Can't find fence device"
-  end
-end
-
 # Creates resource if params[:resource_id] is not set
 def update_resource (params, request, auth_user)
   if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
@@ -1521,44 +1486,10 @@ def get_avail_fence_agents(params, request, auth_user)
   if not allowed_for_local_cluster(auth_user, Permissions::READ)
     return 403, 'Permission denied'
   end
-  agents = getFenceAgents(auth_user)
+  agents = getFenceAgents()
   return JSON.generate(agents)
 end
 
-def resource_metadata(params, request, auth_user)
-  if not allowed_for_local_cluster(auth_user, Permissions::READ)
-    return 403, 'Permission denied'
-  end
-  return 200 if not params[:resourcename] or params[:resourcename] == ""
-  resource_name = params[:resourcename][params[:resourcename].rindex(':')+1..-1]
-  class_provider = params[:resourcename][0,params[:resourcename].rindex(':')]
-
-  @resource = ResourceAgent.new(params[:resourcename])
-  if class_provider == "ocf:heartbeat"
-    @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(auth_user, HEARTBEAT_AGENTS_DIR + resource_name)
-  elsif class_provider == "ocf:pacemaker"
-    @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(auth_user, PACEMAKER_AGENTS_DIR + resource_name)
-  elsif class_provider == 'nagios'
-    @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(auth_user, NAGIOS_METADATA_DIR + resource_name + '.xml')
-  end
-  @new_resource = params[:new]
-  @resources, @groups = getResourcesGroups(auth_user)
-
-  erb :resourceagentform
-end
-
-def fence_device_metadata(params, request, auth_user)
-  if not allowed_for_local_cluster(auth_user, Permissions::READ)
-    return 403, 'Permission denied'
-  end
-  return 200 if not params[:resourcename] or params[:resourcename] == ""
-  @fenceagent = FenceAgent.new(params[:resourcename])
-  @fenceagent.required_options, @fenceagent.optional_options, @fenceagent.advanced_options, @fenceagent.info = getFenceAgentMetadata(auth_user, params[:resourcename])
-  @new_fenceagent = params[:new]
-  
-  erb :fenceagentform
-end
-
 def remove_resource(params, request, auth_user)
   if not allowed_for_local_cluster(auth_user, Permissions::WRITE)
     return 403, 'Permission denied'
@@ -1993,7 +1924,7 @@ def save_tokens(params, request, auth_user)
     end
   }
 
-  tokens_cfg = Cfgsync::PcsdTokens.from_file('')
+  tokens_cfg = Cfgsync::PcsdTokens.from_file()
   sync_successful, sync_responses = Cfgsync::save_sync_new_tokens(
     tokens_cfg, new_tokens, get_corosync_nodes(), $cluster_name
   )
@@ -2180,3 +2111,46 @@ def get_cluster_properties_definition(params, request, auth_user)
   end
   return [400, '{}']
 end
+
+def get_resource_agent_metadata(params, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::READ)
+    return 403, 'Permission denied'
+  end
+  agent = params[:resource_agent]
+  unless agent
+    return [400, 'Parameter "resource_agent" required.']
+  end
+  stdout, stderr, retval = run_cmd(
+    auth_user, PCS, 'resource', 'get_resource_agent_info', agent
+  )
+  if retval != 0
+    if stderr.join('').include?('is not supported')
+      return [200, JSON.generate({
+        :name => agent,
+        :longdesc => '',
+        :shortdesc => '',
+        :parameters => []
+      })]
+    else
+      return [400, stderr.join("\n")]
+    end
+  end
+  return [200, stdout.join("\n")]
+end
+
+def get_fence_agent_metadata(params, request, auth_user)
+  unless allowed_for_local_cluster(auth_user, Permissions::READ)
+    return 403, 'Permission denied'
+  end
+  agent = params[:fence_agent]
+  unless agent
+    return [400, 'Parameter "fence_agent" required.']
+  end
+  stdout, stderr, retval = run_cmd(
+    auth_user, PCS, 'stonith', 'get_fence_agent_info', agent
+  )
+  if retval != 0
+    return [400, stderr.join("\n")]
+  end
+  return [200, stdout.join("\n")]
+end
diff --git a/pcsd/resource.rb b/pcsd/resource.rb
index 3a079c8..8821f42 100644
--- a/pcsd/resource.rb
+++ b/pcsd/resource.rb
@@ -167,83 +167,6 @@ def getAllConstraints(constraints_dom)
   return constraints
 end
 
-def getResourceMetadata(auth_user, resourcepath)
-  options_required = {}
-  options_optional = {}
-  long_desc = ""
-  short_desc = ""
-
-  resourcepath = Pathname.new(resourcepath).cleanpath.to_s
-  resource_dirs = [
-    HEARTBEAT_AGENTS_DIR, PACEMAKER_AGENTS_DIR, NAGIOS_METADATA_DIR,
-  ]
-  if not resource_dirs.any? { |allowed| resourcepath.start_with?(allowed) }
-    $logger.error(
-      "Unable to get metadata of resource agent '#{resourcepath}': " +
-      'path not allowed'
-    )
-    return [options_required, options_optional, [short_desc, long_desc]]
-  end
-
-  if resourcepath.end_with?('.xml')
-    begin
-      metadata = IO.read(resourcepath)
-    rescue
-      metadata = ""
-    end
-  else
-    ENV['OCF_ROOT'] = OCF_ROOT
-    stdout, stderr, retval = run_cmd(auth_user, resourcepath, 'meta-data')
-    metadata = stdout.join
-  end
-
-  begin
-    doc = REXML::Document.new(metadata)
-  rescue REXML::ParseException => e
-    $logger.error(
-      "Unable to parse metadata of resource agent '#{resourcepath}': #{e}"
-    )
-    return [options_required, options_optional, [short_desc, long_desc]]
-  end
-
-  doc.elements.each('resource-agent/longdesc') {|ld|
-    long_desc = ld.text ? ld.text.strip : ld.text
-  }
-  doc.elements.each('resource-agent/shortdesc') {|ld|
-    short_desc = ld.text ? ld.text.strip : ld.text
-  }
-
-  doc.elements.each('resource-agent/parameters/parameter') { |param|
-    temp_array = []
-    if param.attributes["required"] == "1"
-      if param.elements["shortdesc"] and param.elements["shortdesc"].text
-        temp_array << param.elements["shortdesc"].text.strip
-      else
-        temp_array << ""
-      end
-      if param.elements["longdesc"] and param.elements["longdesc"].text
-        temp_array << param.elements["longdesc"].text.strip
-      else
-        temp_array << ""
-      end
-      options_required[param.attributes["name"]] = temp_array
-    else
-      if param.elements["shortdesc"] and param.elements["shortdesc"].text
-        temp_array << param.elements["shortdesc"].text.strip
-      else
-        temp_array << ""
-      end
-      if param.elements["longdesc"] and param.elements["longdesc"].text
-        temp_array << param.elements["longdesc"].text.strip
-      else
-        temp_array << ""
-      end
-      options_optional[param.attributes["name"]] = temp_array
-    end
-  }
-  [options_required, options_optional, [short_desc, long_desc]]
-end
-
 def getResourceAgents(auth_user)
   resource_agent_list = {}
   stdout, stderr, retval = run_cmd(
diff --git a/pcsd/session.rb b/pcsd/session.rb
index c202632..c54a493 100644
--- a/pcsd/session.rb
+++ b/pcsd/session.rb
@@ -53,9 +53,11 @@ class SessionPoolLifetime < Rack::Session::Pool
     return unless lifetime = @default_options[:expire_after]
     with_lock(env) {
       threshold = Time.now() - lifetime
-      @pool_timestamp.select { |sid, timestamp|
-        timestamp < threshold
-      }.keys.each { |sid|
+      sid_to_delete = []
+      @pool_timestamp.each { |sid, timestamp|
+        sid_to_delete << sid if timestamp < threshold
+      }
+      sid_to_delete.each { |sid|
         delete_session(sid)
       }
     }
diff --git a/pcsd/ssl.rb b/pcsd/ssl.rb
index 2858574..f56c947 100644
--- a/pcsd/ssl.rb
+++ b/pcsd/ssl.rb
@@ -26,6 +26,54 @@ def generate_cert_key_pair(server_name)
   return crt, key
 end
 
+def get_ssl_options()
+  default_options = (
+    OpenSSL::SSL::OP_NO_SSLv2 | OpenSSL::SSL::OP_NO_SSLv3 |
+    OpenSSL::SSL::OP_NO_TLSv1 | OpenSSL::SSL::OP_NO_TLSv1_1
+  )
+  if ENV['PCSD_SSL_OPTIONS']
+    options = 0
+    ENV['PCSD_SSL_OPTIONS'].split(',').each { |op|
+      op_cleaned = op.strip()
+      begin
+        if not op_cleaned.start_with?('OP_')
+          raise NameError.new('options must start with OP_')
+        end
+        op_constant = OpenSSL::SSL.const_get(op_cleaned)
+        options |= op_constant
+      rescue NameError => e
+        $logger.error(
+          "SSL configuration error '#{e}', unknown SSL option '#{op}'"
+        )
+        exit
+      rescue => e
+        $logger.error("SSL configuration error '#{e}'")
+        exit
+      end
+    }
+    return options
+  end
+  return default_options
+end
+
+def run_server(server, webrick_options, secondary_addrs)
+  primary_addr = webrick_options[:BindAddress]
+  port = webrick_options[:Port]
+
+  ciphers = 'DEFAULT:!RC4:!3DES:@STRENGTH!'
+  ciphers = ENV['PCSD_SSL_CIPHERS'] if ENV['PCSD_SSL_CIPHERS']
+  # no need to validate ciphers, ssl context will validate them for us
+
+  $logger.info("Listening on #{primary_addr} port #{port}")
+  server.run(Sinatra::Application, webrick_options) { |server_instance|
+    server_instance.ssl_context.ciphers = ciphers
+    secondary_addrs.each { |addr|
+      $logger.info("Adding listener on #{addr} port #{port}")
+      server_instance.listen(addr, port)
+    }
+  }
+end
+
 if not File.exists?(CRT_FILE) or not File.exists?(KEY_FILE)
   crt, key = generate_cert_key_pair(server_name)
   File.open(CRT_FILE, 'w',0700) {|f| f.write(crt)}
@@ -46,16 +94,28 @@ else
   end
 end
 
+default_bind = true
+primary_addr = '::'
+secondary_addrs = []
+if ENV['PCSD_BIND_ADDR']
+  user_addrs = ENV['PCSD_BIND_ADDR'].split(',').collect { |x| x.strip() }
+  if not user_addrs.empty?
+    default_bind = false
+    primary_addr = user_addrs.shift()
+    secondary_addrs = user_addrs
+  end
+end
+
 webrick_options = {
   :Port               => 2224,
-  :BindAddress        => '::',
-  :Host               => '::',
+  :BindAddress        => primary_addr,
+  :Host               => primary_addr,
   :SSLEnable          => true,
   :SSLVerifyClient    => OpenSSL::SSL::VERIFY_NONE,
   :SSLCertificate     => OpenSSL::X509::Certificate.new(crt),
   :SSLPrivateKey      => OpenSSL::PKey::RSA.new(key),
   :SSLCertName        => [[ "CN", server_name ]],
-  :SSLOptions         => OpenSSL::SSL::OP_NO_SSLv2 | OpenSSL::SSL::OP_NO_SSLv3,
+  :SSLOptions         => get_ssl_options(),
 }
 
 server = ::Rack::Handler::WEBrick
@@ -79,9 +139,14 @@ end
 
 require 'pcsd'
 begin
-  server.run(Sinatra::Application, webrick_options)
+  run_server(server, webrick_options, secondary_addrs)
 rescue Errno::EAFNOSUPPORT
-  webrick_options[:BindAddress] = '0.0.0.0'
-  webrick_options[:Host] = '0.0.0.0'
-  server.run(Sinatra::Application, webrick_options)
+  if default_bind
+    primary_addr = '0.0.0.0'
+    webrick_options[:BindAddress] = primary_addr
+    webrick_options[:Host] = primary_addr
+    run_server(server, webrick_options, secondary_addrs)
+  else
+    raise
+  end
 end
diff --git a/pcsd/test/test_cfgsync.rb b/pcsd/test/test_cfgsync.rb
index 2be42da..152b522 100644
--- a/pcsd/test/test_cfgsync.rb
+++ b/pcsd/test/test_cfgsync.rb
@@ -152,8 +152,8 @@ end
 
 
 class TestPcsdSettings < Test::Unit::TestCase
-  def setup()
-    FileUtils.cp(File.join(CURRENT_DIR, "pcs_settings.conf"), CFG_PCSD_SETTINGS)
+  def teardown()
+    FileUtils.rm(CFG_PCSD_SETTINGS, {:force => true})
   end
 
   def test_basics()
@@ -206,16 +206,23 @@ class TestPcsdSettings < Test::Unit::TestCase
   end
 
   def test_file()
+    FileUtils.cp(File.join(CURRENT_DIR, "pcs_settings.conf"), CFG_PCSD_SETTINGS)
     cfg = Cfgsync::PcsdSettings.from_file()
     assert_equal(9, cfg.version)
     assert_equal("ac032803c5190d735cd94a702d42c5c6358013b8", cfg.hash)
   end
+
+  def test_file_missing()
+    cfg = Cfgsync::PcsdSettings.from_file()
+    assert_equal(0, cfg.version)
+    assert_equal('da39a3ee5e6b4b0d3255bfef95601890afd80709', cfg.hash)
+  end
 end
 
 
 class TestPcsdTokens < Test::Unit::TestCase
-  def setup()
-    FileUtils.cp(File.join(CURRENT_DIR, 'tokens'), CFG_PCSD_TOKENS)
+  def teardown()
+    FileUtils.rm(CFG_PCSD_TOKENS, {:force => true})
   end
 
   def test_basics()
@@ -253,10 +260,17 @@ class TestPcsdTokens < Test::Unit::TestCase
   end
 
   def test_file()
+    FileUtils.cp(File.join(CURRENT_DIR, 'tokens'), CFG_PCSD_TOKENS)
     cfg = Cfgsync::PcsdTokens.from_file()
     assert_equal(9, cfg.version)
     assert_equal('571afb6abc603f527462818e7dfe278a8a1f64a7', cfg.hash)
   end
+
+  def test_file_missing()
+    cfg = Cfgsync::PcsdTokens.from_file()
+    assert_equal(0, cfg.version)
+    assert_equal('da39a3ee5e6b4b0d3255bfef95601890afd80709', cfg.hash)
+  end
 end
 
 
diff --git a/pcsd/test/test_config.rb b/pcsd/test/test_config.rb
index 6405a05..26ffaf9 100644
--- a/pcsd/test/test_config.rb
+++ b/pcsd/test/test_config.rb
@@ -11,18 +11,32 @@ class TestConfig < Test::Unit::TestCase
     FileUtils.cp(File.join(CURRENT_DIR, 'pcs_settings.conf'), CFG_PCSD_SETTINGS)
   end
 
-  def test_parse_empty()
-    text = ''
-    cfg = PCSConfig.new(text)
-    assert_equal(0, cfg.clusters.length)
-    assert_equal(
-      [[
-        "error",
-        "Unable to parse pcs_settings file: A JSON text must at least contain two octets!"
-      ]],
-      $logger.log
-    )
-    assert_equal(
+  def fixture_nil_config()
+    return (
+'{
+  "format_version": 2,
+  "data_version": 0,
+  "clusters": [
+
+  ],
+  "permissions": {
+    "local_cluster": [
+      {
+        "type": "group",
+        "name": "haclient",
+        "allow": [
+          "grant",
+          "read",
+          "write"
+        ]
+      }
+    ]
+  }
+}')
+  end
+
+  def fixture_empty_config()
+    return (
 '{
   "format_version": 2,
   "data_version": 0,
@@ -34,12 +48,91 @@ class TestConfig < Test::Unit::TestCase
 
     ]
   }
-}',
-      cfg.text
+}')
+  end
+
+  def test_parse_nil()
+    text = nil
+    cfg = PCSConfig.new(text)
+    assert_equal(0, cfg.clusters.length)
+    assert_equal([], $logger.log)
+    assert_equal(fixture_nil_config, cfg.text)
+  end
+
+  def test_parse_empty()
+    text = ''
+    cfg = PCSConfig.new(text)
+    assert_equal(0, cfg.clusters.length)
+    assert_equal([], $logger.log)
+    assert_equal(fixture_empty_config, cfg.text)
+  end
+
+  def test_parse_whitespace()
+    text = "  \n  "
+    cfg = PCSConfig.new(text)
+    assert_equal(0, cfg.clusters.length)
+    assert_equal([], $logger.log)
+    assert_equal(fixture_empty_config, cfg.text)
+  end
+
+  def test_parse_hash_empty()
+    text = '{}'
+    cfg = PCSConfig.new(text)
+    assert_equal(
+      [['error', 'Unable to parse pcs_settings file: invalid file format']],
+      $logger.log
     )
+    assert_equal(fixture_empty_config, cfg.text)
   end
 
-  def test_parse_format1()
+  def test_parse_hash_no_version()
+    text =
+'{
+  "data_version": 9,
+  "clusters": [
+    {
+      "name": "cluster71",
+      "nodes": [
+        "rh71-node1",
+        "rh71-node2"
+      ]
+    }
+  ]
+}'
+    cfg = PCSConfig.new(text)
+    assert_equal(
+      [['error', 'Unable to parse pcs_settings file: invalid file format']],
+      $logger.log
+    )
+    assert_equal(fixture_empty_config, cfg.text)
+  end
+
+  def test_parse_malformed()
+    text =
+'{
+  "data_version": 9,
+  "clusters": [
+    {
+      "name": "cluster71",
+      "nodes": [
+        "rh71-node1"
+        "rh71-node2"
+      ]
+    }
+  ]
+}'
+    cfg = PCSConfig.new(text)
+    assert_equal(
+      [[
+        'error',
+        "Unable to parse pcs_settings file: 399: unexpected token at '\"rh71-node2\"\n      ]\n    }\n  ]\n}'"
+      ]],
+      $logger.log
+    )
+    assert_equal(fixture_empty_config, cfg.text)
+  end
+
+  def test_parse_format1_empty()
     text = '[]'
     cfg = PCSConfig.new(text)
     assert_equal(0, cfg.clusters.length)
@@ -66,7 +159,9 @@ class TestConfig < Test::Unit::TestCase
 }',
       cfg.text
     )
+  end
 
+  def test_parse_format1_one_cluster()
     text = '
 [
   {
@@ -113,7 +208,7 @@ class TestConfig < Test::Unit::TestCase
     )
   end
 
-  def test_parse_format2()
+  def test_parse_format2_empty()
     text = '
 {
   "format_version": 2
@@ -123,22 +218,10 @@ class TestConfig < Test::Unit::TestCase
     assert_equal(2, cfg.format_version)
     assert_equal(0, cfg.data_version)
     assert_equal(0, cfg.clusters.length)
-    assert_equal(
-'{
-  "format_version": 2,
-  "data_version": 0,
-  "clusters": [
-
-  ],
-  "permissions": {
-    "local_cluster": [
-
-    ]
-  }
-}',
-      cfg.text
-    )
+    assert_equal(fixture_empty_config, cfg.text)
+  end
 
+  def test_parse_format2_one_cluster()
     text =
 '{
   "format_version": 2,
@@ -165,7 +248,9 @@ class TestConfig < Test::Unit::TestCase
     assert_equal("cluster71", cfg.clusters[0].name)
     assert_equal(["rh71-node1", "rh71-node2"], cfg.clusters[0].nodes)
     assert_equal(text, cfg.text)
+  end
 
+  def test_parse_format2_two_clusters()
     text =
 '{
   "format_version": 2,
@@ -585,28 +670,41 @@ class TestTokens < Test::Unit::TestCase
     FileUtils.cp(File.join(CURRENT_DIR, 'tokens'), CFG_PCSD_TOKENS)
   end
 
-  def test_parse_empty()
-    text = ''
-    cfg = PCSTokens.new(text)
-    assert_equal(0, cfg.tokens.length)
-    assert_equal(
-      [[
-        "error",
-        "Unable to parse tokens file: A JSON text must at least contain two octets!"
-      ]],
-      $logger.log
-    )
-    assert_equal(
+  def fixture_empty_config()
+    return(
 '{
   "format_version": 2,
   "data_version": 0,
   "tokens": {
   }
-}',
-      cfg.text
+}'
     )
   end
 
+  def test_parse_nil()
+    text = nil
+    cfg = PCSTokens.new(text)
+    assert_equal(0, cfg.tokens.length)
+    assert_equal([], $logger.log)
+    assert_equal(fixture_empty_config(), cfg.text)
+  end
+
+  def test_parse_empty()
+    text = ''
+    cfg = PCSTokens.new(text)
+    assert_equal(0, cfg.tokens.length)
+    assert_equal([], $logger.log)
+    assert_equal(fixture_empty_config(), cfg.text)
+  end
+
+  def test_parse_whitespace()
+    text = "  \n  "
+    cfg = PCSTokens.new(text)
+    assert_equal(0, cfg.tokens.length)
+    assert_equal([], $logger.log)
+    assert_equal(fixture_empty_config(), cfg.text)
+  end
+
   def test_parse_format1()
     text = '{}'
     cfg = PCSTokens.new(text)
diff --git a/pcsd/views/_configure.erb b/pcsd/views/_configure.erb
index 421f384..fc4e7bd 100644
--- a/pcsd/views/_configure.erb
+++ b/pcsd/views/_configure.erb
@@ -14,28 +14,32 @@
 </tr>
 <tr id="configure_list_row" {{bind-attr style="Pcs.configure_page"}}>
   <td id="config" colspan=3>
-    {{input
-        type="text"
-        value=Pcs.settingsController.filter
-        placeholder="Filter"
-    }}
-    {{#if Pcs.settingsController.show_advanced}}
-      <button onclick="Pcs.settingsController.set('show_advanced', false);">Hide advanced settings</button>
+    {{#if Pcs.settingsController.error}}
+      Unable to get cluster properties.<br/>
     {{else}}
-      <button onclick="Pcs.settingsController.set('show_advanced', true);">Show advanced settings</button>
-    {{/if}}
-    <form id="cluster_properties">
-      <table>
-        {{#each property in Pcs.settingsController.filtered}}
-          {{cluster-property prop=property name=property.name}}
-        {{else}}
-        <tr><td>No cluster properties available.</td></tr>
-        {{/each}}
-      </table>
-      {{#if Pcs.settingsController.filtered}}
-        <button onclick="update_cluster_settings(); return false;">
-          Apply Changes
-        </button>
+      {{input
+          type="text"
+          value=Pcs.settingsController.filter
+          placeholder="Filter"
+      }}
+      {{#if Pcs.settingsController.show_advanced}}
+        <button onclick="Pcs.settingsController.set('show_advanced', false);">Hide advanced settings</button>
+      {{else}}
+        <button onclick="Pcs.settingsController.set('show_advanced', true);">Show advanced settings</button>
+      {{/if}}
+      <form id="cluster_properties">
+        <table>
+          {{#each property in Pcs.settingsController.filtered}}
+            {{cluster-property prop=property name=property.name}}
+          {{else}}
+          <tr><td>No cluster properties available.</td></tr>
+          {{/each}}
+        </table>
+        {{#if Pcs.settingsController.filtered}}
+          <button onclick="update_cluster_settings(); return false;">
+            Apply Changes
+          </button>
+        {{/if}}
       {{/if}}
       <button onclick="show_loading_screen(); refresh_cluster_properties(); return false;">
         Refresh
diff --git a/pcsd/views/_resource.erb b/pcsd/views/_resource.erb
index 7e4cf39..4e2311c 100644
--- a/pcsd/views/_resource.erb
+++ b/pcsd/views/_resource.erb
@@ -51,8 +51,10 @@
 	  <td class="bold">Class/Provider</td>
 	  <td>
 	    <%
+        resources = @resource_agents.keys
+
 	      class_providers = []
-	      @resource_agents.keys.each {|k|
+        resources.each {|k|
 		class_providers << k[0,k.rindex(':')]
 	      }
 	      class_providers.uniq!
@@ -69,36 +71,34 @@
 	  <td class="bold">Type</td>
 	  <td>
 	    <select id="all_ra_types" style="display:none;">
-	      <% @resource_agents.keys.sort_by{|a|a.downcase}.each { |key| %>
+	      <% resources.sort_by{|a|a.downcase}.each { |key| %>
 		<option width=250px  value="<%=key%>"><%=key[(key.rindex(':')+1)..-1]%></option>
 	      <% } %>
 	    </select>
-	    <select id="add_ra_type" onchange="load_resource_form($('#new_resource_agent'), this.value);">
-	      <% @resource_agents.keys.sort_by{|a|a.downcase}.each { |key| %>
-		<option width=250px <%= key == "ocf::heartbeat:IPaddr2" ? "selected" : "" %> value="<%=key%>"><%=key%></option>
+	    <select id="add_ra_type" onchange="load_resource_form(this.value);">
+	      <% resources.sort_by{|a|a.downcase}.each { |key| %>
+		<option width=250px <%= key == "ocf:heartbeat:IPaddr2" ? "selected" : "" %> value="<%=key%>"><%=key%></option>
 	      <% } %>
 	    </select>
 	  </td>
 	</tr>
       </table>
-      <div id="new_resource_agent">
-      </div>
+      {{resource-form agent=Pcs.resourcesContainer.new_resource_agent_metadata groups=Pcs.resourcesContainer.groups_enum}}
     </div>
     <div id="add_stonith" style="display: none;">
       <table id="stonith_selector" style="clear:left;float:left;margin-top:25px;">
 	<tr>
 	  <td class="bold">Type</td>
 	  <td>
-	    <select id="add_stonith_type" onchange="load_resource_form($('#new_stonith_agent'),this.value,true);">
+	    <select id="add_stonith_type" onchange="load_resource_form(this.value,true);">
 	      <% @stonith_agents.keys.sort_by{|a|a.downcase}.each { |name| %>
-		<option width=250px <%= @stonith_agents[name]["type"] == "fence_apc" ? "selected" : "" %> value="<%=@stonith_agents[name]["type"]%>"><%=@stonith_agents[name]["type"]%></option>
+		<option width=250px <%= @stonith_agents[name]["type"] == "fence_apc" ? "selected" : "" %> value="stonith:<%=@stonith_agents[name]["type"]%>"><%=@stonith_agents[name]["type"]%></option>
 	    <% } %>
 	    </select>
 	  </td>
 	</tr>
       </table>
-      <div id="new_stonith_agent">
-      </div>
+        {{fence-form agent=Pcs.resourcesContainer.new_fence_agent_metadata}}
     </div>
     <div id="add_group" style="display: none;">
       <form method=POST onkeypress="if (event.keyCode == 13) {$(this).parent().parent().find('.ui-dialog-buttonpane button:eq(1)').trigger('click');return false;} " action="/resource_group_add">
diff --git a/pcsd/views/fenceagentform.erb b/pcsd/views/fenceagentform.erb
deleted file mode 100644
index f54a8de..0000000
--- a/pcsd/views/fenceagentform.erb
+++ /dev/null
@@ -1,78 +0,0 @@
-<% if @fenceagent %>
-<div id="resource_agent_<%=@fenceagent.name%>">
-  <form method=POST action="/update_fence_device">
-    <table style="clear:left; float:left; margin-top: 25px;">
-      <tr>
-        <td><div class="bold">Description:</div></td>
-        <td><span class="reg" style="float:left;"><%=h(@fenceagent.short_desc)%> </span> <span title="<%=nl2br(h(@fenceagent.long_desc))%>" onclick="$(this).closest('table').find('.long_desc_div').toggle();" class="infoicon sprites" style="margin-top:2px;"></span></td>
-      </tr>
-      <tr>
-        <td></td>
-        <td><div class="long_desc_div reg" style="display:none; font-size:12px; max-width:350px;"><%= nl2br(h(@fenceagent.long_desc))%></div></td>
-      </tr>
-      <% if @new_fenceagent %>
-	<tr>
-	  <td class="reg">
-	    Fence Instance Name
-	  </td>
-	  <td>
-	    <input style="margin-right: 50px;" type="text" name="name" size="35" class="text_field">
-	  </td>
-	</tr>
-      <% end %>
-
-      <% if @existing_resource %>
-	<input type="hidden" name="resource_id" value="<%=@cur_resource.id%>">
-      <% end %>
-
-      <input type="hidden" name="resource_type" value="<%=@fenceagent.name%>">
-      <% @fenceagent.required_options.each { |name, desc|  %>
-	<tr title="<%=h(desc[1])%>">
-	  <td class="reg">
-	     <%= name %>
-	  </td>
-	  <td>
-	    <input placeholder="<%=desc[0]%>" style="margin-right: 50px;" type="text" name="<%= @cur_resource && @cur_resource.instance_attr[name] ? "_res_paramne_" : "_res_paramempty_"%><%=name%>" value="<%=h(@cur_resource.instance_attr[name].value) if (@existing_resource and @cur_resource and @cur_resource.instance_attr[name])%>" size="35" class="text_field">
-	  </td>
-	</tr>
-      <% } %>
-      <tr>
-	<td id="<%= @new_fenceagent ? "optional_fenceargs_new" : "optional_fenceargs_existing" %>" class="reg" onclick="show_hide_constraints(this)" colspan=2>
-	  <span class="rightarrow sprites"></span><span class="downarrow sprites" style="display: none;"></span>
-	  <div class="bold">Optional Arguments:</div>
-	</td>
-      </tr>
-      <% @fenceagent.optional_options.each { |name, desc|  %>
-	<tr style="display: none;">
-	  <td class="reg">
-	     <%= name %>
-	  </td>
-	  <td>
-            <input placeholder="<%=desc[0]%>" style="margin-right: 50px;" type="text" name="<%= @cur_resource && @cur_resource.instance_attr[name] ? "_res_paramne_" : "_res_paramempty_"%><%=name%>" value="<%=h(@cur_resource.instance_attr[name].value) if @existing_resource and @cur_resource and @cur_resource.instance_attr[name]%>" size="35" class="text_field">
-	  </td>
-	</tr>
-      <% } %>
-      <tr class="stop">
-    <td id="<%= @new_fenceagent ? "advanced_fenceargs_new" : "advanced_fenceargs_existing" %>" class="reg" onclick="show_hide_constraints(this)" colspan=2>
-      <span class="rightarrow sprites"></span><span class="downarrow sprites" style="display: none;"></span>
-      <div class="bold">Advanced Arguments:</div>
-    </td>
-      </tr>
-      <% @fenceagent.advanced_options.each { |name, desc|  %>
-    <tr style="display: none;">
-      <td class="reg">
-         <%= name %>
-      </td>
-      <td>
-            <input placeholder="<%=desc[0]%>" style="margin-right: 50px;" type="text" name="<%= @cur_resource && @cur_resource.instance_attr[name] ? "_res_paramne_" : "_res_paramempty_"%><%=name%>" value="<%=h(@cur_resource.instance_attr[name].value) if @existing_resource and @cur_resource and @cur_resource.instance_attr[name]%>" size="35" class="text_field">
-      </td>
-    </tr>
-      <% } %>
-      <tr class="stop"><td class="center" style="padding-top:20px;" colspan=2>
-	  <input type=submit class="text_field apply_changes" onclick="$(this).hide();create_resource($(this).parents('form'),<%=@existing_resource ? 'true':'false'%>,true); return false;" value="<%= @existing_resource ? "Apply Changes" : "Create Fence Instance" %>">
-	</td>
-      </tr>
-    </table>
-  </form>
-</div>
-<% end %>
diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb
index ae3c478..55b02c2 100644
--- a/pcsd/views/main.erb
+++ b/pcsd/views/main.erb
@@ -193,6 +193,25 @@
           <td class="bold" nowrap>Type:</td>
           <td class="reg" nowrap>{{resource.res_type}}</td>
         </tr>
+        {{#if resource.is_primitive}}
+          <tr>
+            <td>
+              <div class="bold">Description:</div>
+            </td>
+            <td>
+              <span class="reg" style="float:left;">
+                {{resource.resource_agent.shortdesc}}
+              </span>
+              <span {{bind-attr title=resource.resource_agent.longdesc_html}} onclick="$(this).closest('table').find('.long_desc_div').toggle();" class="infoicon sprites" style="margin-top:2px;"></span>
+            </td>
+          </tr>
+          <tr>
+            <td></td>
+            <td>
+              <div class="long_desc_div reg" style="display:none; font-size:12px; max-width:350px;">{{{resource.resource_agent.longdesc_html}}}</div>
+            </td>
+          </tr>
+        {{/if}}
         {{#unless resource.stonith}}
         {{#if resource.is_primitive}}
           <tr>
@@ -280,10 +299,14 @@
       {{/unless}}
     </div>
     {{#if stonith}}
-      <div style="clear:left; margin-top: 2em;" id="stonith_agent_form"></div>
+      <div style="clear:left; margin-top: 2em;" id="stonith_agent_form">
+        {{fence-form resource=resource agent=resource.resource_agent}}
+      </div>
     {{else}}
     {{#if resource.is_primitive}}
-      <div style="clear:left; margin-top: 2em;" id="resource_agent_form"></div>
+      <div style="clear:left; margin-top: 2em;" id="resource_agent_form">
+        {{resource-form resource=resource agent=resource.resource_agent}}
+      </div>
     {{/if}}
     {{/if}}
   {{else}}
@@ -552,6 +575,222 @@ Use the 'Add' button to submit the form.">
     </table>
   </script>
 
+  <script type="text/x-handlebars" data-template-name="components/parameters-table-element">
+    <td class="reg">
+      <span style="float: left;">
+        {{param.name}}
+      </span>
+      {{#if param.description}}
+        <span style="margin-left: 0.5em" class="infoicon sprites" {{bind-attr title=param.description}}></span>
+      {{/if}}
+    </td>
+    <td>
+      {{input
+          size="30"
+          type="text"
+          name=param.form_name
+          value=param.cur_val
+          placeholder=param.default
+      }}
+    </td>
+  </script>
+
+  <script type="text/x-handlebars" data-template-name="components/parameters-table">
+  {{#if parameters}}
+    <table style="clear:left; margin-top: 1em;" class="args-table">
+      {{#if show_title}}
+        <tr>
+          <td {{action toggleBody}} {{bind-attr id=table_id}} colspan="2" nowrap>
+            {{#if show_content}}
+              <span class="downarrow sprites"></span>
+            {{else}}
+              <span class="rightarrow sprites"></span>
+            {{/if}}
+            <span class="bold">{{table_name}}</span>
+          </td>
+        </tr>
+      {{/if}}
+      <tbody {{bind-attr style=content_style}}>
+        {{#each parameter in parameters}}
+          {{parameters-table-element param=parameter}}
+        {{/each}}
+      </tbody>
+    </table>
+  {{/if}}
+  </script>
+
+  <script type="text/x-handlebars" data-template-name="components/resource-form">
+    <div style="clear:left; margin-top: 1em;">
+      <form method="post" action="/update_resource">
+        {{#if resource}}
+          <input type="hidden" name="resource_id" {{bind-attr value=resource.id}} />
+        {{/if}}
+        <input type="hidden" name="resource_type" {{bind-attr value=agent.name}} />
+        {{#unless resource}}
+          <div id="resource_group">
+            <table class="args-table">
+              <tbody>
+                <tr>
+                  <td>
+                    <div class="bold">Description:</div>
+                  </td>
+                  <td>
+                    <span class="reg" style="float:left;">
+                      {{agent.shortdesc}}
+                    </span>
+                    <span {{bind-attr title=agent.longdesc_html}} onclick="$(this).closest('table').find('.long_desc_div').toggle();" class="infoicon sprites" style="margin-top:2px;"></span>
+                  </td>
+                </tr>
+                <tr>
+                  <td></td>
+                  <td>
+                    <div class="long_desc_div reg" style="display:none; font-size:12px; max-width:350px;">{{{agent.longdesc_html}}}</div>
+                  </td>
+                </tr>
+                <tr title="Select a group to add the resource to.">
+                  <td nowrap>
+                    <div class="bold">
+                      Resource Group:
+                    </div>
+                  </td>
+                  <td>
+                    {{value-selector
+                        prompt="None"
+                        content=groups
+                        name="resource_group"
+                    }}
+                  </td>
+                </tr>
+                <tr title='Makes the resource run multiple times on the cluster. By default the resource will run once on each of the nodes.'>
+                  <td nowrap>
+                    <div class="bold">
+                      Clone:
+                    </div>
+                  </td>
+                  <td>
+                    <input type="hidden" name="_orig_resource_clone" value="">
+                    <input type=checkbox name="resource_clone">
+                  </td>
+                </tr>
+                <tr title='Makes the resource run multiple times on the cluster and distinguish between Master and Slave operating mode for each instance. By default the resource will run on one node in Master mode and on all other nodes in Slave mode.'>
+                  <td nowrap>
+                    <div class="bold">
+                      Master/Slave:
+                    </div>
+                  </td>
+                  <td>
+                    <input type="hidden" name="_orig_resource_ms" value="">
+                    <input type=checkbox name="resource_ms" >
+                  </td>
+                </tr>
+                <tr title="Do not start the resource automatically after creating.">
+                  <td nowrap>
+                    <div class="bold"
+                      >Disabled:
+                    </div>
+                  </td>
+                  <td>
+                    <input type=checkbox name="disabled">
+                  </td>
+                </tr>
+                <tr>
+                  <td class="reg">
+                    Resource ID
+                  </td>
+                  <td>
+                    <input style="margin-right: 50px;" type="text" name="name" size="30" class="text_field">
+                  </td>
+                </tr>
+              </tbody>
+            </table>
+          </div>
+        {{/unless}}
+        {{parameters-table
+            table_name="Required Arguments"
+            parameters=agent.required_parameters
+            show_content=true
+        }}
+        {{parameters-table
+            table_name="Optional Arguments"
+            parameters=agent.optional_parameters
+        }}
+        {{#if resource}}
+          <input class="apply_changes" type="button" onclick="$(this).hide();create_resource($(this)
+          .parents('form'),true); return false;" value="Apply Changes" style="margin-top: 1em;" />
+          <input type="button" onclick="tree_view_onclick(curResource()); return false;"  value="Refresh" />
+        {{else}}
+          <center>
+            <input class="apply_changes" type="button" onclick="$(this).hide();create_resource($(this)
+            .parents('form'),false); return false;" value="Create Resource" style="margin-top: 1em;" />
+          </center>
+        {{/if}}
+      </form>
+    </div>
+  </script>
+
+  <script type="text/x-handlebars" data-template-name="components/fence-form">
+    <div style="clear:left; margin-top: 2em;">
+      <form method="post" action="/update_fence_device">
+        {{#if resource}}
+          <input type="hidden" name="resource_id" {{bind-attr value=resource.id}} />
+        {{/if}}
+        <input type="hidden" name="resource_type" {{bind-attr value=agent.name}} />
+        {{#unless resource}}
+          <table class="args-table">
+            <tbody>
+              <tr>
+                <td>
+                  <div class="bold">Description:</div>
+                </td>
+                <td>
+                  <span class="reg" style="float:left;">
+                    {{agent.shortdesc}}
+                  </span>
+                  <span {{bind-attr title=agent.longdesc_html}} onclick="$(this).closest('table').find('.long_desc_div').toggle();" class="infoicon sprites" style="margin-top:2px;"></span>
+                </td>
+              </tr>
+              <tr>
+                <td></td>
+                <td>
+                  <div class="long_desc_div reg" style="display:none; font-size:12px; max-width:350px;">{{{agent.longdesc_html}}}</div>
+                </td>
+              </tr>
+              <tr>
+                <td class="reg">
+                  Fence Instance Name
+                </td>
+                <td>
+                  <input style="margin-right: 50px;" type="text" name="name" size="30" class="text_field">
+                </td>
+              </tr>
+            </tbody>
+          </table>
+        {{/unless}}
+        {{parameters-table
+            table_name="Required Arguments"
+            parameters=agent.required_parameters
+            show_content=true
+        }}
+        {{parameters-table
+            table_name="Optional Arguments"
+            parameters=agent.optional_parameters
+        }}
+        {{parameters-table
+            table_name="Advanced Arguments"
+            parameters=agent.advanced_parameters
+        }}
+        {{#if resource}}
+          <input class="apply_changes" type="button" onclick="$(this).hide();create_resource($(this).parents('form'), true, true); return false;" value="Apply Changes" style="margin-top: 1em;" />
+          <input type="button" onclick="tree_view_onclick(curStonith()); return false;" value="Refresh" />
+        {{else}}
+          <center>
+            <input class="apply_changes" type="button" type="submit" onclick="$(this).hide();create_resource($(this).parents('form'), false, true); return false;" value="Create Fence Instance" style="margin-top: 1em;" />
+          </center>
+        {{/if}}
+      </form>
+    </div>
+  </script>
+
   <script type="text/x-handlebars" data-template-name="components/value-selector">
     {{selector-helper content value prompt}}
   </script>
diff --git a/pcsd/views/resourceagentform.erb b/pcsd/views/resourceagentform.erb
deleted file mode 100644
index 039023e..0000000
--- a/pcsd/views/resourceagentform.erb
+++ /dev/null
@@ -1,104 +0,0 @@
-<div id="resource_agent_<%=@resource.name%>" style="float:left;">
-  <form method=POST action="/update_resource">
-    <div id="resource_group">
-      <table>
-        <tr>
-          <td><div class="bold">Description:</div></td>
-	  <td><span class="reg" style="float:left;"><%=h(@resource.short_desc)%> </span> <span title="<%=nl2br(h(@resource.long_desc))%>" onclick="$(this).closest('table').find('.long_desc_div').toggle();" class="infoicon sprites" style="margin-top:2px;"></span></td>
-        </tr>
-        <tr>
-          <td></td>
-          <td><div class="long_desc_div reg" style="display:none; font-size:12px; max-width:350px;"><%= nl2br(h(@resource.long_desc))%></div></td>
-        </tr>
-	<% if @version != '2' %>
-        <tr title="<%= h('Select a group to add the resource to.') %>">
-	  <td nowrap><div class="bold">Resource Group:</div></td>
-	  <td>
-	    <select name="resource_group">
-		<option value="">None</option>
-	      <% @groups.each do |g| %>
-		<option <%= "selected" if g == @cur_resource_group %> value="<%=g%>"><%=g%></option>
-	      <% end %>
-	    </select>
-	    <input type=hidden name="_orig_resource_group" value="<%= @cur_resource_group if @cur_resource_group %>">
-	  </td>
-	</tr>
-	<tr title='<%= h("Makes the resource run multiple times on the cluster. \
-By default the resource will run once on each of the nodes.") %>'>
-	  <td nowrap><div class="bold">Clone:</div></td>
-	  <td>
-	    <input type="hidden" name="_orig_resource_clone" value="<%= @cur_resource_clone if @cur_resource %>">
-	    <input type=checkbox name="resource_clone" <%= "checked" if @cur_resource && @cur_resource_clone %>>
-	  </td>
-	</tr>
-	<tr title='<%= h("Makes the resource run multiple times on the cluster and \
-distinguish between Master and Slave operating mode for each instance. \
-By default the resource will run on one node in Master mode and on all other \
-nodes in Slave mode.") %>'>
-	  <td nowrap><div class="bold">Master/Slave:</div></td>
-	  <td>
-	    <input type="hidden" name="_orig_resource_ms" value="<%= @cur_resource_ms if @cur_resource %>">
-	    <input type=checkbox name="resource_ms" <%= "checked" if @cur_resource && @cur_resource_ms %>>
-	  </td>
-	</tr>
-	<% end %>
-	<% if not @existing_resource %>
-	  <tr title="<%= h('Do not start the resource automatically after creating.') %>">
-	    <td nowrap><div class="bold">Disabled:</div></td>
-	    <td>
-	      <input type=checkbox name="disabled">
-	    </td>
-	  </tr>
-      <% end %>
-      </table>
-    </div>
-    <table style="clear:left; float:left; margin-top: 25px;">
-      <% if @new_resource %>
-	<tr>
-	  <td class="reg">
-	    Resource ID
-	  </td>
-	  <td>
-	    <input style="margin-right: 50px;" type="text" name="name" size="35" class="text_field">
-	  </td>
-	</tr>
-      <% end %>
-
-      <% if @existing_resource %>
-	<input type="hidden" name="resource_id" value="<%=@cur_resource.id%>">
-      <% end %>
-
-      <input type="hidden" name="resource_type" value="<%=@resource.name%>">
-      <% @resource.required_options.each { |name, desc|  %>
-	<tr title="<%=h(desc[1])%>">
-	  <td class="reg">
-	    <%= name %>
-	  </td>
-	  <td>
-	    <input placeholder="<%=desc[0]%>" style="margin-right: 50px;" type="text" name="<%= @cur_resource && @cur_resource.instance_attr[name] ? "_res_paramne_" : "_res_paramempty_"%><%=name%>" value="<%=h(@cur_resource.instance_attr[name].value) if @existing_resource && @cur_resource && @cur_resource.instance_attr[name] %>" size="35" class="text_field">
-	  </td>
-	</tr>
-      <% } %>
-      <tr>
-	<td id="<%= @new_resource ? "optional_args_new" : "optional_args_existing" %>" nowrap class="reg" onclick="show_hide_constraints(this)" colspan=2>
-	  <span class="rightarrow sprites"></span><span class="downarrow sprites" style="display: none;"></span>
-	  <div class="bold">Optional Arguments:</div>
-	</td>
-      </tr>
-      <% @resource.optional_options.each { |name, desc|  %>
-	<tr title="<%=h(desc[1])%>" style="display:none;">
-	  <td class="reg">
-	    <%= name %>
-	  </td>
-	  <td>
-	    <input placeholder="<%=desc[0]%>" style="margin-right: 50px;" type="text" name="<%= @cur_resource && @cur_resource.instance_attr[name] ? "_res_paramne_" : "_res_paramempty_"%><%=name%>" value="<%=h(@cur_resource.instance_attr[name].value) if @existing_resource && @cur_resource && @cur_resource.instance_attr[name] %>" size="35" class="text_field">
-	  </td>
-	</tr>
-      <% } %>
-      <tr class="stop"><td class="center" style="padding-top:20px;height:26px;" colspan=2>
-	  <input class="apply_changes" type=submit class="text_field" onclick="$(this).hide();create_resource($(this).parents('form'),<%=@existing_resource ? 'true':'false'%>); return false;" value="<%= @existing_resource ? "Apply Changes" : "Create Resource" %>">
-	</td>
-      </tr>
-    </table>
-  </form>
-</div>
diff --git a/setup.py b/setup.py
index 5c08e07..58c159a 100644
--- a/setup.py
+++ b/setup.py
@@ -1,14 +1,34 @@
 #!/usr/bin/env python
 
-from distutils.core import setup
+import os
 
-setup(name='pcs',
-    version='0.9.149',
+from setuptools import setup, Command, find_packages
+
+class CleanCommand(Command):
+    user_options = []
+    def initialize_options(self):
+        self.cwd = None
+    def finalize_options(self):
+        self.cwd = os.getcwd()
+    def run(self):
+        assert os.getcwd() == self.cwd, 'Must be in package root: %s' % self.cwd
+        os.system('rm -rf ./build ./dist ./*.pyc ./*.egg-info')
+
+setup(
+    name='pcs',
+    version='0.9.151',
     description='Pacemaker Configuration System',
     author='Chris Feist',
     author_email='cfeist at redhat.com',
     url='http://github.com/feist/pcs',
-    packages=['pcs'],
+    packages=find_packages(exclude=["*.test", "*.test.*", "test.*", "test"]),
     package_data={'pcs':['bash_completion.d.pcs','pcs.8']},
-    py_modules=['pcs']
-    )
+    entry_points={
+        'console_scripts': [
+            'pcs = pcs.app:main',
+        ],
+    },
+    cmdclass={
+        'clean': CleanCommand,
+    }
+)

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-ha/pcs.git



More information about the Debian-HA-Commits mailing list