[Debian-ha-commits] [pcs] 01/01: Imported Upstream version 0.9.148

Richard Winters devrik-guest at moszumanska.debian.org
Tue Jan 26 17:19:25 UTC 2016


This is an automated email from the git hooks/post-receive script.

devrik-guest pushed a commit to branch upstream
in repository pcs.

commit ef4569052e2333a6ba5855d50ec3f1380ab16ab2
Author: Richard B Winters <rik at mmogp.com>
Date:   Tue Jan 26 11:07:14 2016 -0500

    Imported Upstream version 0.9.148
---
 .gitignore                                         |    7 +
 MANIFEST.in                                        |    6 +
 Makefile                                           |  105 +-
 README                                             |   50 +-
 README.md                                          |  105 +
 maketarballs.py                                    |   21 +-
 newversion.py                                      |   41 +-
 pcs/acl.py                                         |  131 +-
 pcs/cluster.py                                     | 1411 ++++++---
 pcs/config.py                                      |  378 ++-
 pcs/constraint.py                                  |  237 +-
 pcs/corosync.conf.fedora.template                  |   22 -
 pcs/corosync.conf.template                         |   39 -
 pcs/corosync_conf.py                               |  153 +
 pcs/node.py                                        |  112 +
 pcs/pcs.8                                          |  142 +-
 pcs/pcs.py                                         |  118 +-
 pcs/pcsd.py                                        |  173 ++
 pcs/prop.py                                        |   32 +-
 pcs/resource.py                                    | 1740 ++++++-----
 pcs/rule.py                                        |   34 +-
 pcs/settings.py                                    |    8 +-
 ...ttings.py => settings.py.i386-linux-gnu.debian} |   16 +-
 ...ings.py => settings.py.x86_64-linux-gnu.debian} |   16 +-
 pcs/status.py                                      |  154 +-
 pcs/stonith.py                                     |  126 +-
 pcs/test/.gitignore                                |    1 +
 pcs/test/Makefile                                  |   27 +-
 pcs/test/corosync.conf                             |   21 +-
 pcs/test/corosync.conf.orig                        |   21 +-
 pcs/test/pcs_test_functions.py                     |   21 +-
 pcs/test/test.py                                   |   12 +-
 pcs/test/test_acl.py                               |  313 +-
 pcs/test/test_cluster.py                           | 1677 ++++++++--
 pcs/test/test_constraints.py                       |  547 ++--
 pcs/test/test_corosync_conf.py                     | 1182 +++++++
 pcs/test/test_node.py                              |  221 ++
 pcs/test/test_properties.py                        |   11 +-
 pcs/test/test_resource.py                          | 2229 ++++++++++++--
 pcs/test/test_rule.py                              |  293 +-
 pcs/test/test_stonith.py                           |  377 ++-
 pcs/test/test_utils.py                             | 1161 ++++---
 pcs/test/transitions01.xml                         |  296 ++
 pcs/test/transitions02.xml                         |  116 +
 pcs/usage.py                                       |  390 ++-
 pcs/utils.py                                       | 1654 +++++-----
 pcsd/Gemfile                                       |    9 +-
 pcsd/Gemfile.lock                                  |   23 +-
 pcsd/Makefile                                      |   15 +-
 pcsd/auth.rb                                       |  149 +-
 pcsd/bootstrap.rb                                  |   77 +
 pcsd/cfgsync.rb                                    |  773 +++++
 pcsd/cluster.rb                                    |   24 +-
 pcsd/cluster_entity.rb                             | 1051 +++++++
 pcsd/config.rb                                     |  167 +-
 pcsd/corosyncconf.rb                               |  152 +
 pcsd/fenceagent.rb                                 |   54 +-
 pcsd/pcs.rb                                        | 1702 +++++++++--
 pcsd/pcsd                                          |   40 +-
 pcsd/pcsd-cli.rb                                   |  138 +
 pcsd/pcsd.conf                                     |    1 +
 pcsd/pcsd.debian                                   |  141 +
 pcsd/pcsd.pam.debian                               |    5 +
 pcsd/pcsd.rb                                       |  816 +++--
 pcsd/pcsd.service                                  |    8 +-
 pcsd/pcsd.service.debian                           |    9 +
 pcsd/permissions.rb                                |  180 ++
 pcsd/public/css/style.css                          |   39 +-
 pcsd/public/js/nodes-ember.js                      | 2316 ++++++++------
 pcsd/public/js/pcsd.js                             | 1886 +++++++++---
 pcsd/remote.rb                                     | 2049 +++++++++----
 pcsd/resource.rb                                   |  356 +--
 pcsd/settings.rb                                   |   24 +
 pcsd/settings.rb.i386-linux-gnu.debian             |   24 +
 pcsd/settings.rb.x86_64-linux-gnu.debian           |   24 +
 pcsd/ssl.rb                                        |   80 +-
 pcsd/systemd-notify-fix.py                         |   16 -
 pcsd/test/.gitignore                               |    1 +
 pcsd/test/Makefile                                 |    6 +
 pcsd/test/cib1.xml                                 |  401 +++
 pcsd/test/cluster.conf                             |   27 +
 pcsd/test/corosync.conf                            |   27 +
 pcsd/test/crm1.xml                                 |  112 +
 pcsd/test/pcs_settings.conf                        |   21 +
 pcsd/test/pcsd_test_utils.rb                       |   27 +
 pcsd/test/test_all_suite.rb                        |   11 +
 pcsd/test/test_auth.rb                             |   95 +
 pcsd/test/test_cfgsync.rb                          |  917 ++++++
 pcsd/test/test_cluster.rb                          |   51 +
 pcsd/test/test_cluster_entity.rb                   | 3226 ++++++++++++++++++++
 pcsd/test/test_config.rb                           |  711 +++++
 pcsd/test/test_corosyncconf.rb                     | 1208 ++++++++
 pcsd/test/test_pcs.rb                              |  257 ++
 pcsd/test/test_permissions.rb                      |  498 +++
 pcsd/test/tokens                                   |    9 +
 pcsd/views/_acls.erb                               |   36 +-
 pcsd/views/_cluster_list.erb                       |   83 +-
 pcsd/views/_configure.erb                          |   75 +-
 pcsd/views/_dialogs.erb                            |   38 +
 pcsd/views/_permissions_cluster.erb                |  120 +
 pcsd/views/_resource.erb                           |  327 +-
 pcsd/views/_resource_list.erb                      |   43 +-
 pcsd/views/configure.erb                           |   39 -
 pcsd/views/fenceagentform.erb                      |    8 +-
 pcsd/views/main.erb                                |  665 +++-
 pcsd/views/manage.erb                              |  181 +-
 pcsd/views/nodes.erb                               |  508 +--
 pcsd/views/permissions.erb                         |   54 +
 pcsd/views/resourceagentform.erb                   |   20 +-
 pcsd/views/resourcedeps.erb                        |   83 -
 pcsd/wizards/apache.rb                             |   10 +-
 setup.py                                           |    4 +-
 112 files changed, 30293 insertions(+), 7901 deletions(-)

diff --git a/.gitignore b/.gitignore
index 0d20b64..950d231 100644
--- a/.gitignore
+++ b/.gitignore
@@ -1 +1,8 @@
 *.pyc
+*.swp
+/MANIFEST
+/dist/
+/pcs/bash_completion.d.pcs
+/pcsd/pcs_settings.conf
+/pcsd/pcs_users.conf
+
diff --git a/MANIFEST.in b/MANIFEST.in
index 95a1487..6ac4a25 100644
--- a/MANIFEST.in
+++ b/MANIFEST.in
@@ -1,4 +1,10 @@
 include Makefile
 include COPYING
+include pcs/pcs.8
+include pcs/bash_completion.d.pcs
+include pcsd/.bundle/config
 graft pcsd
+graft pcsd/vendor/cache
 prune pcsd/vendor/bundle
+prune pcsd/test
+recursive-exclude pcsd .gitignore
diff --git a/Makefile b/Makefile
index c37373b..2d3ff51 100644
--- a/Makefile
+++ b/Makefile
@@ -1,3 +1,21 @@
+# Compatibility with GNU/Linux [i.e. Debian] based distros
+UNAME_OS_GNU := $(shell if uname -o | grep -q "GNU/Linux" ; then echo true; else echo false; fi)
+UNAME_KERNEL_DEBIAN := $(shell if uname -v | grep -q "Debian\|Ubuntu" ; then echo true; else echo false; fi)
+IS_DEBIAN=false
+UNAME_DEBIAN_VER_8=false
+
+ifeq ($(UNAME_OS_GNU),true)
+  ifeq ($(UNAME_KERNEL_DEBIAN),true)
+    IS_DEBIAN=true
+    UNAME_DEBIAN_VER_8 := $(shell if grep -q -i "8" /etc/debian_version ; then echo true; else echo false; fi)
+    settings_x86_64 := $(shell if uname -m | grep -q -i "x86_64" ; then echo true; else echo false; fi)
+    settings_i386=false
+    ifeq ($(settings_x86_64),false)
+      settings_i386 := $(shell if uname -m | grep -q -i "i386" ; then echo true; else echo false; fi)
+    endif
+  endif
+endif
+
 ifndef PYTHON_SITELIB
   PYTHON_SITELIB=$(shell python -c "from distutils.sysconfig import get_python_lib; print(get_python_lib())")
 endif
@@ -8,25 +26,61 @@ ifeq ($(PYTHON_SITELIB), /usr/lib/python2.7/dist-packages)
   EXTRA_SETUP_OPTS="--install-layout=deb"
 endif
 
+# Check for systemd presence, add compatibility with Debian based distros
 IS_SYSTEMCTL=false
-ifeq ("$(wildcard /usr/bin/systemctl)","/usr/bin/systemctl")
-  IS_SYSTEMCTL=true
+
+ifeq ($(IS_DEBIAN),true)
+  IS_SYSTEMCTL = $(shell if [ -d /var/run/systemd/system ] ; then echo true ; else echo false; fi)
+  ifeq ($(IS_SYSTEMCTL),false)
+    ifeq ($(SYSTEMCTL_OVERRIDE),true)
+      IS_SYSTEMCTL=true
+    endif
+  endif
 else
-  ifeq ("$(wildcard /bin/systemctl)","/usr/bin/systemctl")
+  ifeq ("$(wildcard /usr/bin/systemctl)","/usr/bin/systemctl")
     IS_SYSTEMCTL=true
+  else
+    ifeq ("$(wildcard /bin/systemctl)","/usr/bin/systemctl")
+      IS_SYSTEMCTL=true
+    endif
   endif
 endif
 
+# Check for an override for building gems
+ifndef BUILD_GEMS
+  BUILD_GEMS=true
+endif
+
 MANDIR=/usr/share/man
 
 ifndef PREFIX
   PREFIX=$(shell prefix=`python -c "import sys; print(sys.prefix)"` || prefix="/usr"; echo $$prefix)
 endif
 
+ifndef systemddir
+  systemddir=/usr/lib/systemd
+endif
+
 ifndef initdir
   initdir=/etc/init.d
 endif
 
+ifndef install_settings
+  install_settings=false
+else
+  ifeq ($(install_settings),true)
+    ifeq ($(settings_x86_64),true)
+      settings_file=settings.py.x86_64-linux-gnu.debian
+      settings_file_pcsd=settings.rb.x86_64-linux-gnu.debian
+    else
+      ifeq ($(settings_i386),true)
+        settings_file=settings.py.i386-linux-gnu.debian
+        settings_file_pcsd=settings.rb.i386-linux-gnu.debian
+      endif
+    endif
+  endif
+endif
+
 install: bash_completion
 	python setup.py install --prefix ${DESTDIR}${PREFIX} ${EXTRA_SETUP_OPTS}
 	mkdir -p ${DESTDIR}${PREFIX}/sbin/
@@ -34,30 +88,61 @@ install: bash_completion
 	ln -fs ${PYTHON_SITELIB}/pcs/pcs.py ${DESTDIR}${PREFIX}/sbin/pcs
 	install -D pcs/bash_completion.d.pcs ${DESTDIR}/etc/bash_completion.d/pcs
 	install -m644 -D pcs/pcs.8 ${DESTDIR}/${MANDIR}/man8/pcs.8
+ifeq ($(IS_DEBIAN),true)
+  ifeq ($(install_settings),true)
+	rm -f  ${DESTDIR}${PYTHON_SITELIB}/pcs/settings.py
+	install -m755 pcs/${settings_file} ${DESTDIR}${PYTHON_SITELIB}/pcs/settings.py
+  endif
+endif
+	
 
 install_pcsd:
+ifeq ($(BUILD_GEMS),true)
 	make -C pcsd build_gems
+endif
 	mkdir -p ${DESTDIR}/var/log/pcsd
+ifeq ($(IS_DEBIAN),true)
+	mkdir -p ${DESTDIR}/usr/share/
+	cp -r pcsd ${DESTDIR}/usr/share/
+	install -m 644 -D pcsd/pcsd.conf ${DESTDIR}/etc/default/pcsd
+	install -d ${DESTDIR}/etc/pam.d
+	install  pcsd/pcsd.pam.debian ${DESTDIR}/etc/pam.d/pcsd
+  ifeq ($(install_settings),true)
+	rm -f  ${DESTDIR}/usr/share/pcsd/settings.rb
+	install -m755 pcsd/${settings_file_pcsd} ${DESTDIR}/usr/share/pcsd/settings.rb
+  endif
+  ifeq ($(IS_SYSTEMCTL),true)
+	install -d ${DESTDIR}/${systemddir}/system/
+	install -m 644 pcsd/pcsd.service.debian ${DESTDIR}/${systemddir}/system/pcsd.service
+  else
+	install -m 755 -D pcsd/pcsd.debian ${DESTDIR}/${initdir}/pcsd
+  endif
+else
 	mkdir -p ${DESTDIR}${PREFIX}/lib/
 	cp -r pcsd ${DESTDIR}${PREFIX}/lib/
 	install -m 644 -D pcsd/pcsd.conf ${DESTDIR}/etc/sysconfig/pcsd
 	install -d ${DESTDIR}/etc/pam.d
 	install  pcsd/pcsd.pam ${DESTDIR}/etc/pam.d/pcsd
-	install -m 700 -d ${DESTDIR}/var/lib/pcsd
-	install -m 644 -D pcsd/pcsd.logrotate ${DESTDIR}/etc/logrotate.d/pcsd
-ifeq ($(IS_SYSTEMCTL),true)
-	install -d ${DESTDIR}/usr/lib/systemd/system/
-	install -m 644 pcsd/pcsd.service ${DESTDIR}/usr/lib/systemd/system/
-else
+  ifeq ($(IS_SYSTEMCTL),true)
+	install -d ${DESTDIR}/${systemddir}/system/
+	install -m 644 pcsd/pcsd.service ${DESTDIR}/${systemddir}/system/
+  else
 	install -m 755 -D pcsd/pcsd ${DESTDIR}/${initdir}/pcsd
+  endif
 endif
+	install -m 700 -d ${DESTDIR}/var/lib/pcsd
+	install -m 644 -D pcsd/pcsd.logrotate ${DESTDIR}/etc/logrotate.d/pcsd
 
 uninstall:
 	rm -f ${DESTDIR}${PREFIX}/sbin/pcs
 	rm -rf ${DESTDIR}${PYTHON_SITELIB}/pcs
+ifeq ($(IS_DEBIAN),true)
+	rm -rf ${DESTDIR}/usr/share/pcsd
+else
 	rm -rf ${DESTDIR}${PREFIX}/lib/pcsd
+endif
 ifeq ($(IS_SYSTEMCTL),true)
-	rm -f ${DESTDIR}/usr/lib/systemd/system/pcsd.service
+	rm -f ${DESTDIR}/${systemddir}/system/pcsd.service
 else
 	rm -f ${DESTDIR}/${initdir}/pcsd
 endif
diff --git a/README b/README
index 6ea79a2..a42b03a 100644
--- a/README
+++ b/README
@@ -1,20 +1,31 @@
 PCS - Pacemaker/Corosync configuration system
 
-Quick install
+Quick start
 
-# tar -xzvf pcs-0.9.138.tar.gz
-# cd pcs-0.9.138
+To install pcs run the following in terminal
+
+# tar -xzvf pcs-0.9.143.tar.gz
+# cd pcs-0.9.143
 # make install
 
+If you are using Debian or Debian-based distribution (such as Ubuntu), run
+the following instead:
+
+# tar -xzvf pcs-0.9.143.tar.gz
+# cd pcs-0.9.143
+# make install install_settings=true
+
 This will install pcs into /usr/sbin/pcs
 
-To create a cluster run the following commands on all nodes (replacing node1, node2, node3 with a list of nodes in the cluster.
+To create a cluster run the following commands on all nodes (replacing node1,
+node2, node3 with a list of nodes in the cluster).
 # pcs cluster setup --local --name cluster_name node1 node2 node3
 
 Then run the following command on all nodes:
 # pcs cluster start
 
-After a few moments the cluster should startup and you can get the status of the cluster
+After a few moments the cluster should startup and you can get the status of
+the cluster
 # pcs status
 
 After this you can add resources and stonith agents:
@@ -22,15 +33,30 @@ After this you can add resources and stonith agents:
 and
 # pcs stonith help
 
+You can also install pcsd which operates as a GUI and remote server for pcs.
+pcsd may also be necessary in order to follow the guides on the clusterlabs.org
+website.  To install pcsd run the following commands from the root of your pcs
+directory.  (You must have the ruby bundler gem installed, rubygem-bundler in
+Fedora, and development packages installed)
+
+# cd pcsd ; make get_gems ; cd ..
+# make install_pcsd
+
+If you are using Debian or Debian-based distribution (such as Ubuntu), run
+the following instead:
+
+# cd pcsd ; make get_gems ; cd ..
+# make install_pcsd install_settings=true
+
+If you are using GNU/Linux its now time to:
+# systemctl daemon-reload
+
 Currently this is built into Fedora (other distributions to follow).  You can
 see the current Fedora .spec in the fedora package git repositories here:
 http://pkgs.fedoraproject.org/cgit/pcs.git/
 
-Current Fedora 18 .spec:
-http://pkgs.fedoraproject.org/cgit/pcs.git/tree/pcs.spec?h=f18
-
-You can also install pcsd which operates as a GUI and remote server for pcs.  To install pcsd run the following commands from the root of your pcs directory. (You must have the ruby bundler gem installed, rubygem-bundler in Fedora, and development packages installed)
-# cd pcsd ; make get_gems ; cd ..
-# make install_pcsd
+Current Fedora 23 .spec:
+http://pkgs.fedoraproject.org/cgit/pcs.git/tree/pcs.spec?h=f23
 
-If you have an questions or concerns please feel free to email cfeist at redhat.com or open a github issue on the pcs project.
+If you have an questions or concerns please feel free to email
+cfeist at redhat.com or open a github issue on the pcs project.
diff --git a/README.md b/README.md
new file mode 100644
index 0000000..bc8f6f0
--- /dev/null
+++ b/README.md
@@ -0,0 +1,105 @@
+## PCS - Pacemaker/Corosync configuration system
+
+
+### Quick Start
+***
+
+
+- **PCS Installation from Source**
+
+   Run the following in terminal:
+
+   ```shell
+   # tar -xzvf pcs-0.9.143.tar.gz
+   # cd pcs-0.9.143
+   # make install
+   ```
+
+   If you are using Debian or Debian-based distribution (such as Ubuntu), run the following instead:
+
+   ```shell
+   # tar -xzvf pcs-0.9.143.tar.gz
+   # cd pcs-0.9.143
+   # make install install_settings=true
+   ```
+
+   This will install pcs into `/usr/sbin/pcs`.
+
+<br />
+- **Create and Start a Basic Cluster**
+
+   To create a cluster run the following commands on all nodes (replacing node1, node2, node3 with a list of nodes in the cluster).
+
+   ```shell
+   # pcs cluster setup --local --name cluster_name node1 node2 node3
+   ```
+
+   Then run the following command on all nodes:
+
+   ```shell
+   # pcs cluster start
+   ```
+
+<br />
+- **Check the Cluster Status**
+
+   After a few moments the cluster should startup and you can get the status of the cluster
+
+   ```shell
+   # pcs status
+   ```
+
+<br />
+- **Add Cluster Resources**
+
+   After this you can add resources and stonith agents:
+
+   ```shell
+   # pcs resource help
+   ```
+
+   and
+
+   ```shell
+   # pcs stonith help
+   ```
+
+<br />
+- **PCSD Installation from Source**
+
+   You can also install pcsd which operates as a GUI and remote server for pcs. pcsd may also be necessary in order to follow the guides on the clusterlabs.org website.
+
+   To install pcsd run the following commands from the root of your pcs directory. (You must have the ruby bundler gem installed, rubygem-bundler in Fedora, and development packages installed)
+
+   ```shell
+   # cd pcsd ; make get_gems ; cd ..
+   # make install_pcsd
+   ```
+
+   If you are using Debian or Debian-based distribution (such as Ubuntu), run the following instead:
+
+   ```shell
+   # cd pcsd ; make get_gems ; cd ..
+   # make install_pcsd install_settings=true
+   ```
+
+   If you are using GNU/Linux its now time to:
+
+   ```shell
+   # systemctl daemon-reload
+   ```
+
+<br />
+### Packages
+***
+
+   Currently this is built into Fedora (other distributions to follow).  You can see the current Fedora .spec in the fedora package git repositories here: http://pkgs.fedoraproject.org/cgit/pcs.git/
+
+   Current Fedora 23 .spec:
+   http://pkgs.fedoraproject.org/cgit/pcs.git/tree/pcs.spec?h=f23
+
+<br />
+### Inquiries
+***
+
+If you have any questions or concerns please feel free to email cfeist at redhat.com or open a github issue on the pcs project.
diff --git a/maketarballs.py b/maketarballs.py
index 2839ef9..d3048fe 100644
--- a/maketarballs.py
+++ b/maketarballs.py
@@ -1,13 +1,22 @@
 #!/usr/bin/python
 
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
 import sys
 import os
-sys.path.append("pcs")
+
+sys.path.insert(
+    0,
+    os.path.join(os.path.dirname(os.path.abspath(__file__)), "pcs")
+)
 import settings
 
-pcs_version = settings.pcs_version
 
-print os.system("cp dist/pcs-"+pcs_version+".tar dist/pcs-withgems-"+pcs_version+".tar")
-print os.system("tar --delete -f dist/pcs-"+pcs_version+".tar '*/pcsd/vendor'")
-print os.system("gzip dist/pcs-"+pcs_version+".tar")
-print os.system("gzip dist/pcs-withgems-"+pcs_version+".tar")
+pcs_version = settings.pcs_version
+print(os.system("cp dist/pcs-"+pcs_version+".tar dist/pcs-withgems-"+pcs_version+".tar"))
+print(os.system("tar --delete -f dist/pcs-"+pcs_version+".tar '*/pcsd/vendor'"))
+print(os.system("gzip dist/pcs-"+pcs_version+".tar"))
+print(os.system("gzip dist/pcs-withgems-"+pcs_version+".tar"))
diff --git a/newversion.py b/newversion.py
index eb49fca..8007f52 100644
--- a/newversion.py
+++ b/newversion.py
@@ -1,26 +1,47 @@
 #!/usr/bin/python
 
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
 import sys
 import os
-sys.path.append("pcs")
+import locale
+import datetime
+
+sys.path.insert(
+    0,
+    os.path.join(os.path.dirname(os.path.abspath(__file__)), "pcs")
+)
 import settings
 
+
+locale.setlocale(locale.LC_ALL, ("en_US", "UTF-8"))
+
 # Get the current version, increment by 1, verify changes, git commit & tag
 pcs_version_split = settings.pcs_version.split('.')
 pcs_version_split[2] = str(int(pcs_version_split[2]) + 1)
 new_version = ".".join(pcs_version_split)
 
-print os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' setup.py")
-print os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' pcs/settings.py")
-print os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' pcs/pcs.8")
-print os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' pcsd/pcsd.rb")
+print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' setup.py"))
+print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' pcs/settings.py"))
+print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' pcs/settings.py.i386-linux-gnu.debian"))
+print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' pcs/settings.py.x86_64-linux-gnu.debian"))
+print(os.system("sed -i 's/"+settings.pcs_version+"/"+new_version + "/' pcsd/bootstrap.rb"))
+
+manpage_head = '.TH PCS "8" "{date}" "pcs {version}" "System Administration Utilities"'.format(
+    date=datetime.date.today().strftime('%B %Y'),
+    version=new_version
+)
+print(os.system("sed -i '1c " + manpage_head + "' pcs/pcs.8"))
 
-print os.system("git diff")
-print "Look good? (y/n)"
+print(os.system("git diff"))
+print("Look good? (y/n)")
 choice = sys.stdin.read(1)
 if choice != "y":
-  print "Ok, exiting"
+  print("Ok, exiting")
   sys.exit(0)
 
-print os.system("git commit -a -m 'Bumped to "+new_version+"'")
-print os.system("git tag "+new_version)
+print(os.system("git commit -a -m 'Bumped to "+new_version+"'"))
+print(os.system("git tag "+new_version))
diff --git a/pcs/acl.py b/pcs/acl.py
index 4c2d696..bebd7d2 100644
--- a/pcs/acl.py
+++ b/pcs/acl.py
@@ -1,8 +1,15 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
 import sys
+
 import usage
 import utils
 import prop
 
+
 def acl_cmd(argv):
     if len(argv) == 0:
         argv = ["show"]
@@ -41,10 +48,10 @@ def acl_show(argv):
     properties = prop.get_set_properties(defaults=prop.get_default_properties())
     acl_enabled = properties.get("enable-acl", "").lower()
     if utils.is_cib_true(acl_enabled):
-        print "ACLs are enabled"
+        print("ACLs are enabled")
     else:
-        print "ACLs are disabled, run 'pcs acl enable' to enable"
-    print
+        print("ACLs are disabled, run 'pcs acl enable' to enable")
+    print()
 
     print_targets(dom)
     print_groups(dom)
@@ -57,11 +64,11 @@ def acl_disable(argv):
     prop.set_property(["enable-acl=false"])
 
 def acl_grant(argv):
-    print "Not yet implemented"
+    print("Not yet implemented")
 
 def acl_role(argv):
     if len(argv) < 2:
-        usage.acl("role")
+        usage.acl(["role"])
         sys.exit(1)
 
     dom = utils.get_cib_dom()
@@ -87,31 +94,16 @@ def acl_role(argv):
         if description != "":
             element.setAttribute("description", description)
         acls.appendChild(element)
-        
-        while (len(argv) > 2):
-            rwd = argv.pop(0)
-            if not rwd in ["read","write","deny"]:
-                usage.acl("role create")
-                sys.exit(1)
-            se = dom.createElement("acl_permission")
-            se.setAttribute("id", utils.find_unique_id(dom,role_name + "-" + rwd))
-            se.setAttribute("kind", rwd)
-            xp_id = argv.pop(0)
-            if xp_id == "xpath":
-                xpath_query = argv.pop(0)
-                se.setAttribute("xpath",xpath_query)
-            elif xp_id == "id":
-                acl_ref = argv.pop(0)
-                se.setAttribute("reference",acl_ref)
-            else:
-                usage.acl("role create")
-
-            element.appendChild(se)
 
+        if not add_permissions_to_role(element, argv):
+            usage.acl(["role create"])
+            sys.exit(1)
         utils.replace_cib_configuration(dom)
+
     elif command == "delete":
         if len(argv) < 1:
-            usage.acl("acl role delete")
+            usage.acl(["role delete"])
+            sys.exit(1)
 
         role_id = argv.pop(0)
         found = False
@@ -135,7 +127,7 @@ def acl_role(argv):
         utils.replace_cib_configuration(dom)
     elif command == "assign":
         if len(argv) < 2:
-            usage.acl("role assign")
+            usage.acl(["role assign"])
             sys.exit(1)
 
         if len(argv) == 2:
@@ -145,7 +137,7 @@ def acl_role(argv):
             role_id = argv[0]
             ug_id = argv[2]
         else:
-            usage.acl("role assign")
+            usage.acl(["role assign"])
             sys.exit(1)
 
         found = False
@@ -176,7 +168,7 @@ def acl_role(argv):
         utils.replace_cib_configuration(dom)
     elif command == "unassign":
         if len(argv) < 2:
-            usage.acl("role unassign")
+            usage.acl(["role unassign"])
             sys.exit(1)
 
         role_id = argv.pop(0)
@@ -216,10 +208,10 @@ def acl_role(argv):
 def acl_target(argv,group=False):
     if len(argv) < 2:
         if group:
-            usage.acl("group")
+            usage.acl(["group"])
             sys.exit(1)
         else:
-            usage.acl("target")
+            usage.acl(["user"])
             sys.exit(1)
 
     dom = utils.get_cib_dom()
@@ -245,6 +237,8 @@ def acl_target(argv,group=False):
 
         acls.appendChild(element)
         for role in argv:
+            if not utils.dom_get_element_with_id(acls, "acl_role", role):
+                utils.err("cannot find acl role: %s" % role)
             r = dom.createElement("role")
             r.setAttribute("id", role)
             element.appendChild(r)
@@ -270,14 +264,14 @@ def acl_target(argv,group=False):
         utils.replace_cib_configuration(dom)
     else:
         if group:
-            usage.acl("group")
+            usage.acl(["group"])
         else:
-            usage.acl("target")
+            usage.acl(["user"])
         sys.exit(1)
 
 def acl_permission(argv):
     if len(argv) < 1:
-        usage.acl("permission")
+        usage.acl(["permission"])
         sys.exit(1)
 
     dom = utils.get_cib_dom()
@@ -286,7 +280,7 @@ def acl_permission(argv):
     command = argv.pop(0)
     if command == "add":
         if len(argv) < 4:
-            usage.acl("permission add")
+            usage.acl(["permission add"])
             sys.exit(1)
         role_id = argv.pop(0)
         found = False
@@ -298,27 +292,17 @@ def acl_permission(argv):
             acl_role(["create", role_id] + argv) 
             return
 
-        while len(argv) >= 3:
-            kind = argv.pop(0)
-            se = dom.createElement("acl_permission")
-            se.setAttribute("id", utils.find_unique_id(dom, role_id + "-" + kind))
-            se.setAttribute("kind", kind)
-            xp_id = argv.pop(0).lower()
-            if xp_id == "xpath":
-                xpath_query = argv.pop(0)
-                se.setAttribute("xpath",xpath_query)
-            elif xp_id == "id":
-                acl_ref = argv.pop(0)
-                se.setAttribute("reference",acl_ref)
-            else:
-                usage.acl("permission add")
-            role.appendChild(se)
-
+        if not argv:
+            usage.acl(["permission add"])
+            sys.exit(1)
+        if not add_permissions_to_role(role, argv):
+            usage.acl(["permission add"])
+            sys.exit(1)
         utils.replace_cib_configuration(dom)
 
     elif command == "delete":
         if len(argv) < 1:
-            usage.acl("permission delete")
+            usage.acl(["permission delete"])
             sys.exit(1)
 
         perm_id = argv.pop(0)
@@ -333,32 +317,30 @@ def acl_permission(argv):
         utils.replace_cib_configuration(dom)
 
     else:
-        usage.acl("permission")
+        usage.acl(["permission"])
         sys.exit(1)
 
 def print_groups(dom):
     for elem in dom.getElementsByTagName("acl_group"):
-        print "Group: " + elem.getAttribute("id")
-        print "  Roles:",
+        print("Group: " + elem.getAttribute("id"))
         role_list = []
         for role in elem.getElementsByTagName("role"):
             role_list.append(role.getAttribute("id"))
-        print " ".join(role_list)
+        print(" ".join(["  Roles:"] + role_list))
 
 def print_targets(dom):
     for elem in dom.getElementsByTagName("acl_target"):
-        print "User: " + elem.getAttribute("id")
-        print "  Roles:",
+        print("User: " + elem.getAttribute("id"))
         role_list = []
         for role in elem.getElementsByTagName("role"):
             role_list.append(role.getAttribute("id"))
-        print " ".join(role_list)
+        print(" ".join(["  Roles:"] + role_list))
 
 def print_roles(dom):
     for elem in dom.getElementsByTagName("acl_role"):
-        print "Role: " + elem.getAttribute("id")
+        print("Role: " + elem.getAttribute("id"))
         if elem.getAttribute("description"):
-            print "  Description: " + elem.getAttribute("description")
+            print("  Description: " + elem.getAttribute("description"))
         for perm in elem.getElementsByTagName("acl_permission"):
             perm_name = "  Permission: " + perm.getAttribute("kind")
             if "xpath" in perm.attributes.keys():
@@ -366,7 +348,7 @@ def print_roles(dom):
             elif "reference" in perm.attributes.keys():
                 perm_name += " id " + perm.getAttribute("reference")
             perm_name += " (" + perm.getAttribute("id") + ")"
-            print perm_name
+            print(perm_name)
 
 def get_acls(dom):        
     acls = dom.getElementsByTagName("acls")
@@ -379,3 +361,28 @@ def get_acls(dom):
     else:
         acls = acls[0]
     return (dom,acls)
+
+def add_permissions_to_role(role_element, argv):
+    dom = role_element.ownerDocument
+    role_id = role_element.getAttribute("id")
+    while argv:
+        if len(argv) < 3:
+            return False
+        rwd = argv.pop(0).lower()
+        if not rwd in ["read", "write", "deny"]:
+            return False
+        se = dom.createElement("acl_permission")
+        se.setAttribute("id", utils.find_unique_id(dom, role_id + "-" + rwd))
+        se.setAttribute("kind", rwd)
+        xp_id = argv.pop(0).lower()
+        if xp_id == "xpath":
+            xpath_query = argv.pop(0)
+            se.setAttribute("xpath", xpath_query)
+        elif xp_id == "id":
+            acl_ref = argv.pop(0)
+            se.setAttribute("reference", acl_ref)
+        else:
+            return False
+        role_element.appendChild(se)
+    return True
+
diff --git a/pcs/cluster.py b/pcs/cluster.py
index 35901d2..e7c408f 100644
--- a/pcs/cluster.py
+++ b/pcs/cluster.py
@@ -1,29 +1,38 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
 import os
 import subprocess
 import re
-import usage
-import urllib2
-import utils
 import sys
-import getpass
-import status
-import prop
-import resource
-import stonith
-import constraint
-import settings
 import socket
 import tempfile
 import datetime
-import commands
 import json
 import xml.dom.minidom
 import threading
+try:
+    # python2
+    from commands import getstatusoutput
+except ImportError:
+    # python3
+    from subprocess import getstatusoutput
+
+import settings
+import usage
+import utils
+import corosync_conf as corosync_conf_utils
+import pcsd
+import status
+import prop
+import resource
+import stonith
+import constraint
+
 
 pcs_dir = os.path.dirname(os.path.realpath(__file__))
-COROSYNC_CONFIG_TEMPLATE = pcs_dir + "/corosync.conf.template"
-COROSYNC_CONFIG_FEDORA_TEMPLATE = pcs_dir + "/corosync.conf.fedora.template"
-COROSYNC_CONFIG_FILE = settings.corosync_conf_file
 
 def cluster_cmd(argv):
     if len(argv) == 0:
@@ -35,16 +44,15 @@ def cluster_cmd(argv):
         usage.cluster(argv)
     elif (sub_cmd == "setup"):
         if "--name" in utils.pcs_options:
-            corosync_setup([utils.pcs_options["--name"]] + argv)
+            cluster_setup([utils.pcs_options["--name"]] + argv)
         else:
-            utils.err("A cluster name (--name <name>) is required to setup a cluster")
+            utils.err(
+                "A cluster name (--name <name>) is required to setup a cluster"
+            )
     elif (sub_cmd == "sync"):
         sync_nodes(utils.getNodesFromCorosyncConf(),utils.getCorosyncConf())
     elif (sub_cmd == "status"):
         status.cluster_status(argv)
-        print ""
-        print "PCSD Status:"
-        cluster_gui_status([],True)
     elif (sub_cmd == "pcsd-status"):
         cluster_gui_status(argv)
     elif (sub_cmd == "certkey"):
@@ -117,22 +125,6 @@ def cluster_cmd(argv):
         usage.cluster()
         sys.exit(1)
 
-# Create config and then send it to all of the nodes and start
-# corosync & pacemaker on the nodes
-# partial_argv is an array of args passed to corosync configure sync_start
-def sync_start(partial_argv, nodes):
-    argv = partial_argv[:]
-    config = corosync_setup(argv,True)
-    for node in nodes:
-        utils.setCorosyncConfig(node,config)
-    print "Starting cluster on nodes: " + ", ".join(nodes) + "..."
-    start_cluster_nodes(nodes)
-
-def sync(partial_argv,nodes):
-    argv = partial_argv[:]
-    config = corosync_setup(argv,True)
-    sync_nodes(nodes,config)
-
 def sync_nodes(nodes,config):
     for node in nodes:
         utils.setCorosyncConfig(node,config)
@@ -152,12 +144,12 @@ def cluster_token(argv):
     node = argv[0]
     tokens = utils.readTokens()
     if node in tokens:
-        print tokens[node]
+        print(tokens[node])
     else:
         utils.err("No authorization token for: %s" % (node))
 
 def cluster_token_nodes(argv):
-    print "\n".join(sorted(utils.readTokens().keys()))
+    print("\n".join(sorted(utils.readTokens().keys())))
 
 def auth_nodes(nodes):
     if "-u" in utils.pcs_options:
@@ -171,44 +163,94 @@ def auth_nodes(nodes):
         password = None
 
     set_nodes = set(nodes)
-    failed_count = 0
-    for node in nodes:
-        status = utils.checkAuthorization(node)
-        need_auth = status[0] == 3 or "--force" in utils.pcs_options
-        mutually_authorized = False
-        if status[0] == 0:
-            try:
-                auth_status = json.loads(status[1])
-                if auth_status["success"]:
-                    if set_nodes == set(auth_status["node_list"]):
-                        mutually_authorized = True
-            except ValueError, KeyError:
-                pass
-        if need_auth or not mutually_authorized:
-            if username == None:
-                sys.stdout.write('Username: ')
-                sys.stdout.flush()
-                username = raw_input("")
-            if password == None:
-                if sys.stdout.isatty():
-                    password = getpass.getpass("Password: ")
-                else:
-                    sys.stdout.write('Password: ')
-                    sys.stdout.flush()
-                    password = raw_input("")
-            if not utils.updateToken(node,nodes,username,password):
-                failed_count += 1
-                continue
-            print "%s: Authorized" % (node)
-        elif mutually_authorized:
-            print node + ": Already authorized"
-        else:
-            utils.err("Unable to communicate with %s" % (node), False)
-            failed_count += 1
+    need_auth = "--force" in utils.pcs_options or (username or password)
+    if not need_auth:
+        for node in set_nodes:
+            status = utils.checkAuthorization(node)
+            if status[0] == 3:
+                need_auth = True
+                break
+            mutually_authorized = False
+            if status[0] == 0:
+                try:
+                    auth_status = json.loads(status[1])
+                    if auth_status["success"]:
+                        if set_nodes.issubset(set(auth_status["node_list"])):
+                            mutually_authorized = True
+                except (ValueError, KeyError):
+                    pass
+            if not mutually_authorized:
+                need_auth = True
+                break
 
-    if failed_count > 0:
-        sys.exit(failed_count)
+    if need_auth:
+        if username == None:
+            username = utils.get_terminal_input('Username: ')
+        if password == None:
+            password = utils.get_terminal_password()
 
+        auth_nodes_do(
+            set_nodes, username, password, '--force' in utils.pcs_options,
+            '--local' in utils.pcs_options
+        )
+    else:
+        for node in set_nodes:
+            print(node + ": Already authorized")
+
+def auth_nodes_do(nodes, username, password, force, local):
+    pcsd_data = {
+        'nodes': list(set(nodes)),
+        'username': username,
+        'password': password,
+        'force': force,
+        'local': local,
+    }
+    output, retval = utils.run_pcsdcli('auth', pcsd_data)
+    if retval == 0 and output['status'] == 'access_denied':
+        utils.err('Access denied')
+    if retval == 0 and output['status'] == 'ok' and output['data']:
+        failed = False
+        try:
+            if not output['data']['sync_successful']:
+                utils.err(
+                    "Some nodes had a newer tokens than the local node. "
+                    + "Local node's tokens were updated. "
+                    + "Please repeat the authentication if needed."
+                )
+            for node, result in output['data']['auth_responses'].items():
+                if result['status'] == 'ok':
+                    print("{0}: Authorized".format(node))
+                elif result['status'] == 'already_authorized':
+                    print("{0}: Already authorized".format(node))
+                elif result['status'] == 'bad_password':
+                    utils.err(
+                        "{0}: Username and/or password is incorrect".format(node),
+                        False
+                    )
+                    failed = True
+                elif result['status'] == 'noresponse':
+                    utils.err("Unable to communicate with {0}".format(node), False)
+                    failed = True
+                else:
+                    utils.err("Unexpected response from {0}".format(node), False)
+                    failed = True
+            if output['data']['sync_nodes_err']:
+                utils.err(
+                    (
+                        "Unable to synchronize and save tokens on nodes: {0}. "
+                        + "Are they authorized?"
+                    ).format(
+                        ", ".join(output['data']['sync_nodes_err'])
+                    ),
+                    False
+                )
+                failed = True
+        except:
+            utils.err('Unable to communicate with pcsd')
+        if failed:
+            sys.exit(1)
+        return
+    utils.err('Unable to communicate with pcsd')
 
 # If no arguments get current cluster node status, otherwise get listed
 # nodes status
@@ -225,414 +267,704 @@ def cluster_gui_status(argv,dont_exit = False):
     else:
         bad_nodes = check_nodes(argv, "  ")
     if bad_nodes and not dont_exit:
-        sys.exit(1)
+        sys.exit(2)
 
 def cluster_certkey(argv):
-    if len(argv) != 2:
-        usage.cluster(["certkey"])
-        exit(1)
-
-    certfile = argv[0]
-    keyfile = argv[1]
-
-    try:
-        with open(certfile, 'r') as myfile:
-            cert = myfile.read()
-    except IOError as e:
-        utils.err(e)
-
-    try:
-        with open(keyfile, 'r') as myfile:
-            key = myfile.read()
-    except IOError as e:
-        utils.err(e)
-
-    if not "--force" in utils.pcs_options and (os.path.exists(settings.pcsd_cert_location) or os.path.exists(settings.pcsd_key_location)):
-        utils.err("certificate and/or key already exists, your must use --force to overwrite")
-
-    try:
-        try:
-            os.chmod(settings.pcsd_cert_location, 0700)
-        except OSError: # If the file doesn't exist, we don't care
-            pass
-
-        try:
-            os.chmod(settings.pcsd_key_location, 0700)
-        except OSError: # If the file doesn't exist, we don't care
-            pass
-
-        with os.fdopen(os.open(settings.pcsd_cert_location, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0700), 'wb') as myfile:
-            myfile.write(cert)
-
-        with os.fdopen(os.open(settings.pcsd_key_location, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0700), 'wb') as myfile:
-            myfile.write(key)
-
-    except IOError as e:
-        utils.err(e)
-
-    print "Certificate and key updated, you may need to restart pcsd (service pcsd restart) for new settings to take effect"
+    return pcsd.pcsd_certkey(argv)
 
 # Check and see if pcsd is running on the nodes listed
 def check_nodes(nodes, prefix = ""):
     bad_nodes = False
-    pm_nodes = utils.getPacemakerNodesID(True)
-    cs_nodes = utils.getCorosyncNodesID(True)
+    if not utils.is_rhel6():
+        pm_nodes = utils.getPacemakerNodesID(True)
+        cs_nodes = utils.getCorosyncNodesID(True)
     for node in nodes:
         status = utils.checkAuthorization(node)
 
-        if node not in pm_nodes.values():
-            for n_id, n in cs_nodes.items():
-                if node == n and n_id in pm_nodes:
-                    real_node_name = pm_nodes[n_id]
-                    if real_node_name == "(null)":
-                        real_node_name = "*Unknown*"
-                    node = real_node_name +  " (" + node + ")"
-                    break
+        if not utils.is_rhel6():
+            if node not in pm_nodes.values():
+                for n_id, n in cs_nodes.items():
+                    if node == n and n_id in pm_nodes:
+                        real_node_name = pm_nodes[n_id]
+                        if real_node_name == "(null)":
+                            real_node_name = "*Unknown*"
+                        node = real_node_name +  " (" + node + ")"
+                        break
 
         if status[0] == 0:
-            print prefix + node + ": Online"
+            print(prefix + node + ": Online")
         elif status[0] == 3:
-            print prefix + node + ": Unable to authenticate"
+            print(prefix + node + ": Unable to authenticate")
             bad_nodes = True
         else:
-            print prefix + node + ": Offline"
+            print(prefix + node + ": Offline")
             bad_nodes = True
     return bad_nodes
-    
-def corosync_setup(argv,returnConfig=False):
-    fedora_config = not utils.is_rhel6()
-    failure = False
-    primary_nodes = []
-
-    # If node contains a ',' we only care about the first address
-    for node in argv[1:]:
-        if "," in node:
-            primary_nodes.append(node.split(',')[0])
-        else:
-            primary_nodes.append(node)
 
+def cluster_setup(argv):
     if len(argv) < 2:
-        usage.cluster()
-        exit(1)
+        usage.cluster(["setup"])
+        sys.exit(1)
 
-    if not returnConfig and "--start" in utils.pcs_options and not "--local" in utils.pcs_options:# and fedora_config:
-        sync_start(argv, primary_nodes)
-        if "--enable" in utils.pcs_options:
-            enable_cluster(primary_nodes)
-        return
-    elif not returnConfig and not "--local" in utils.pcs_options:# and fedora_config:
-        sync(argv, primary_nodes)
-        if "--enable" in utils.pcs_options:
-            enable_cluster(primary_nodes)
-        return
-    else:
-        nodes = argv[1:]
-        cluster_name = argv[0]
+    is_rhel6 = utils.is_rhel6()
+    cluster_name = argv[0]
 
-# Verify that all nodes are resolvable otherwise problems may occur
+    # get nodes' addresses
     udpu_rrp = False
-    node_addr_list = []
-    for node in nodes:
+    node_list = []
+    primary_addr_list = []
+    all_addr_list = []
+    for node in argv[1:]:
         addr_list = utils.parse_multiring_node(node)
+        primary_addr_list.append(addr_list[0])
+        all_addr_list.append(addr_list[0])
+        node_options = {
+            "ring0_addr": addr_list[0],
+        }
         if addr_list[1]:
             udpu_rrp = True
-        node_addr_list.extend(addr_list)
-    for node_addr in node_addr_list:
-        if node_addr:
-            try:
-                socket.getaddrinfo(node_addr, None)
-            except socket.error:
-                print "Warning: Unable to resolve hostname: %s" % node_addr
-                failure = True
-
+            all_addr_list.append(addr_list[1])
+            node_options["ring1_addr"] = addr_list[1]
+        node_list.append(node_options)
+    # special case of ring1 address on cman
+    if is_rhel6 and not udpu_rrp and "--addr1" in utils.pcs_options:
+        for node in node_list:
+            node["ring1_addr"] = utils.pcs_options["--addr1"]
+
+    # verify addresses
     if udpu_rrp:
-        for node in nodes:
-            if "," not in node:
-                utils.err("if one node is configured for RRP, all nodes must configured for RRP")
-
-    if failure and "--force" not in utils.pcs_options:
-        utils.err("Unable to resolve all hostnames (use --force to override).")
-
-    transport = "udp" if utils.is_rhel6() else "udpu"
-    if "--transport" in utils.pcs_options:
-        transport = utils.pcs_options["--transport"]
-        if (
-            transport not in ("udp", "udpu")
-            and
-            "--force" not in utils.pcs_options
-        ):
-            utils.err(
-                "unknown transport '%s', use --force to override" % transport
-            )
+        for node_options in node_list:
+            if "ring1_addr" not in node_options:
+                utils.err(
+                    "if one node is configured for RRP, "
+                    + "all nodes must be configured for RRP"
+                )
 
-    if transport == "udpu" and utils.is_rhel6():
-        print("Warning: Using udpu transport on a CMAN cluster, "
-            + "cluster restart is required after node add or remove")
-    if (
-        transport == "udpu"
-        and
-        ("--addr0" in utils.pcs_options or "--addr1" in utils.pcs_options)
-    ):
-        utils.err("--addr0 and --addr1 can only be used with --transport=udp")
+    nodes_unresolvable = False
+    for node_addr in all_addr_list:
+        try:
+            socket.getaddrinfo(node_addr, None)
+        except socket.error:
+            print("Warning: Unable to resolve hostname: {0}".format(node_addr))
+            nodes_unresolvable = True
+    if nodes_unresolvable and "--force" not in utils.pcs_options:
+        utils.err("Unable to resolve all hostnames, use --force to override")
+
+    # parse, validate and complete options
+    if is_rhel6:
+        options, messages = cluster_setup_parse_options_cman(utils.pcs_options)
+    else:
+        options, messages = cluster_setup_parse_options_corosync(
+            utils.pcs_options
+        )
+    if udpu_rrp and "rrp_mode" not in options["transport_options"]:
+        options["transport_options"]["rrp_mode"] = "passive"
+    cluster_setup_print_messages(messages)
+
+    # prepare config file
+    if is_rhel6:
+        config, messages = cluster_setup_create_cluster_conf(
+            cluster_name,
+            node_list,
+            options["transport_options"],
+            options["totem_options"]
+        )
+    else:
+        config, messages = cluster_setup_create_corosync_conf(
+            cluster_name,
+            node_list,
+            options["transport_options"],
+            options["totem_options"],
+            options["quorum_options"]
+        )
+    cluster_setup_print_messages(messages)
+
+    # setup on the local node
+    if "--local" in utils.pcs_options:
+        # Config path can be overriden by --corosync_conf or --cluster_conf
+        # command line options. If it is overriden we do not touch any cluster
+        # which may be set up on the local node.
+        if is_rhel6:
+            config_path = settings.cluster_conf_file
+        else:
+            config_path = settings.corosync_conf_file
+        config_path_overriden = (
+            (is_rhel6 and "--cluster_conf" in utils.pcs_options)
+            or
+            (not is_rhel6 and "--corosync_conf" in utils.pcs_options)
+        )
 
-    rrpmode = None
-    if "--rrpmode" in utils.pcs_options or udpu_rrp or "--addr0" in utils.pcs_options:
-        rrpmode = "passive"
-        if "--rrpmode" in utils.pcs_options:
-            rrpmode = utils.pcs_options["--rrpmode"]
-        if rrpmode == "active" and "--force" not in utils.pcs_options:
-            utils.err("using a RRP mode of 'active' is not supported or tested, use --force to override")
-        elif rrpmode != "passive" and "--force" not in utils.pcs_options:
-            utils.err("%s is an unknown RRP mode, use --force to override" % rrpmode)
-
-    if fedora_config == True:
-        if os.path.exists(settings.corosync_conf_file) and not "--force" in utils.pcs_options:
-            utils.err("%s already exists, use --force to overwrite" % settings.corosync_conf_file)
-        if not ("--corosync_conf" in utils.pcs_options and "--local" in utils.pcs_options):
+        # verify and ensure no cluster is set up on the host
+        if "--force" not in utils.pcs_options and os.path.exists(config_path):
+            utils.err("{0} already exists, use --force to overwrite".format(
+                config_path
+            ))
+        if not config_path_overriden:
             cib_path = os.path.join(settings.cib_dir, "cib.xml")
-            if os.path.exists(cib_path) and not "--force" in utils.pcs_options:
-                utils.err("%s already exists, use --force to overwrite" % cib_path)
-        if "--corosync_conf" not in utils.pcs_options:
+            if "--force" not in utils.pcs_options and os.path.exists(cib_path):
+                utils.err("{0} already exists, use --force to overwrite".format(
+                    cib_path
+                ))
             cluster_destroy([])
 
-        f = open(COROSYNC_CONFIG_FEDORA_TEMPLATE, 'r')
+        # set up the cluster
+        utils.setCorosyncConf(config)
+        if "--start" in utils.pcs_options:
+            start_cluster([])
+        if "--enable" in utils.pcs_options:
+            enable_cluster([])
 
-        corosync_config = f.read()
-        f.close()
+    # setup on remote nodes
+    else:
+        # verify and ensure no cluster is set up on the nodes
+        # checks that nodes are authenticated as well
+        if "--force" not in utils.pcs_options:
+            all_nodes_available = True
+            for node in primary_addr_list:
+                available, message = utils.canAddNodeToCluster(node)
+                if not available:
+                    all_nodes_available = False
+                    utils.err("{0}: {1}".format(node, message), False)
+            if not all_nodes_available:
+                utils.err(
+                    "nodes availability check failed, use --force to override. "
+                    + "WARNING: This will destroy existing cluster on the nodes."
+                )
+        print("Destroying cluster on nodes: {0}...".format(
+            ", ".join(primary_addr_list)
+        ))
+        destroy_cluster(primary_addr_list)
+        print()
+
+        # send local cluster pcsd configs to the new nodes
+        print("Sending cluster config files to the nodes...")
+        pcsd_data = {
+            "nodes": primary_addr_list,
+            "force": True,
+            "clear_local_cluster_permissions": True,
+        }
+        err_msgs = []
+        output, retval = utils.run_pcsdcli("send_local_configs", pcsd_data)
+        if retval == 0 and output["status"] == "ok" and output["data"]:
+            try:
+                for node in primary_addr_list:
+                    node_response = output["data"][node]
+                    if node_response["status"] == "notauthorized":
+                        err_msgs.append(
+                            "Unable to authenticate to " + node
+                            + ", try running 'pcs cluster auth'"
+                        )
+                    if node_response["status"] not in ["ok", "not_supported"]:
+                        err_msgs.append(
+                            "Unable to set pcsd configs on {0}".format(node)
+                        )
+            except:
+                err_msgs.append("Unable to communicate with pcsd")
+        else:
+            err_msgs.append("Unable to set pcsd configs")
+        for err_msg in err_msgs:
+            print("Warning: {0}".format(err_msg))
+
+        # send the cluster config
+        for node in primary_addr_list:
+            utils.setCorosyncConfig(node, config)
+
+        # start and enable the cluster if requested
+        if "--start" in utils.pcs_options:
+            print("\nStarting cluster on nodes: {0}...".format(
+                ", ".join(primary_addr_list)
+            ))
+            start_cluster_nodes(primary_addr_list)
+        if "--enable" in utils.pcs_options:
+            enable_cluster(primary_addr_list)
+
+        # sync certificates as the last step because it restarts pcsd
+        print()
+        pcsd.pcsd_sync_certs([], exit_after_error=False)
+
+def cluster_setup_parse_options_corosync(options):
+    messages = []
+    parsed = {
+        "transport_options": {
+            "rings_options": [],
+        },
+        "totem_options": {},
+        "quorum_options": {},
+    }
+
+    transport = "udpu"
+    if "--transport" in options:
+        transport = options["--transport"]
+        if transport not in ("udp", "udpu"):
+            messages.append({
+                "text": "unknown transport '{0}'".format(transport),
+                "type": "error",
+                "forceable": True,
+            })
+    parsed["transport_options"]["transport"] = transport
+
+    if transport == "udpu" and ("--addr0" in options or "--addr1" in options):
+        messages.append({
+            "text": "--addr0 and --addr1 can only be used with --transport=udp",
+            "type": "error",
+            "forceable": False,
+        })
 
-        i = 1
-        new_nodes_section = ""
-        for node in nodes:
-            new_nodes_section += "  node {\n"
-            if udpu_rrp:
-                new_nodes_section += "        ring0_addr: %s\n" % (node.split(",")[0])
-                new_nodes_section += "        ring1_addr: %s\n" % (node.split(",")[1])
+    rrpmode = None
+    if "--rrpmode" in options or "--addr0" in options:
+        rrpmode = "passive"
+        if "--rrpmode" in options:
+            rrpmode = options["--rrpmode"]
+        if rrpmode not in ("passive", "active"):
+            messages.append({
+                "text": "{0} is an unknown RRP mode".format(rrpmode),
+                "type": "error",
+                "forceable": True,
+            })
+        if rrpmode == "active":
+            messages.append({
+                "text": "using a RRP mode of 'active' is not supported or tested",
+                "type": "error",
+                "forceable": True,
+            })
+    if rrpmode:
+        parsed["transport_options"]["rrp_mode"] = rrpmode
+
+    totem_options_names = {
+        "--token": "token",
+        "--token_coefficient": "token_coefficient",
+        "--join": "join",
+        "--consensus": "consensus",
+        "--miss_count_const": "miss_count_const",
+        "--fail_recv_const": "fail_recv_const",
+    }
+    for opt_name, parsed_name in totem_options_names.items():
+        if opt_name in options:
+            parsed["totem_options"][parsed_name] = options[opt_name]
+
+    if transport == "udp":
+        interface_ids = []
+        if "--addr0" in options:
+            interface_ids.append(0)
+            if "--addr1" in options:
+                interface_ids.append(1)
+        for interface in interface_ids:
+            ring_options = {}
+            ring_options["addr"] = options["--addr{0}".format(interface)]
+            if "--broadcast{0}".format(interface) in options:
+                ring_options["broadcast"] = True
             else:
-                new_nodes_section += "        ring0_addr: %s\n" % (node)
-            new_nodes_section += "        nodeid: %d\n" % (i)
-            new_nodes_section += "       }\n"
-            i = i+1
-
-        two_node_section = ""
-        if len(nodes) == 2:
-            two_node_section = "two_node: 1"
-
-        quorum_options = ""
-        if "--wait_for_all" in utils.pcs_options:
-            quorum_options += "wait_for_all: " + utils.pcs_options["--wait_for_all"] + "\n"
-        if "--auto_tie_breaker" in utils.pcs_options:
-            quorum_options += "auto_tie_breaker: " + utils.pcs_options["--auto_tie_breaker"] + "\n"
-        if "--last_man_standing" in utils.pcs_options:
-            quorum_options += "last_man_standing: " + utils.pcs_options["--last_man_standing"] + "\n"
-        if "--last_man_standing_window" in utils.pcs_options:
-            quorum_options += "last_man_standing_window: " + utils.pcs_options["--last_man_standing_window"] + "\n"
-
-        ir = ""
-
-        if rrpmode:
-            ir += "rrp_mode: " + rrpmode + "\n"
-
-        if transport == "udp":
-
-            if "--addr0" in utils.pcs_options:
-                ir += utils.generate_rrp_corosync_config(0)
-
-                if "--addr1" in utils.pcs_options:
-                    ir += utils.generate_rrp_corosync_config(1)
-        if "--ipv6" in utils.pcs_options:
-            ip_version = "ip_version: ipv6\n"
-        else:
-            ip_version = ""
-
-
-        totem_options = ""
-        if "--token" in utils.pcs_options:
-            totem_options += "token: " + utils.pcs_options["--token"] + "\n"
-        if "--token_coefficient" in utils.pcs_options:
-            totem_options += "token_coefficient: " + utils.pcs_options["--token_coefficient"] + "\n"
-        if "--join" in utils.pcs_options:
-            totem_options += "join: " + utils.pcs_options["--join"] + "\n"
-        if "--consensus" in utils.pcs_options:
-            totem_options += "consensus: " + utils.pcs_options["--consensus"] + "\n"
-        if "--miss_count_const" in utils.pcs_options:
-            totem_options += "miss_count_const: " + utils.pcs_options["--miss_count_const"] + "\n"
-        if "--fail_recv_const" in utils.pcs_options:
-            totem_options += "fail_recv_const: " + utils.pcs_options["--fail_recv_const"] + "\n"
-
-        corosync_config = corosync_config.replace("@@nodes", new_nodes_section)
-        corosync_config = corosync_config.replace("@@cluster_name",cluster_name)
-        corosync_config = corosync_config.replace("@@quorum_options\n",quorum_options)
-        corosync_config = corosync_config.replace("@@two_node",two_node_section)
-        corosync_config = corosync_config.replace("@@transport",transport)
-        corosync_config = corosync_config.replace("@@interfaceandrrpmode\n",ir)
-        corosync_config = corosync_config.replace("@@ip_version\n",ip_version)
-        corosync_config = corosync_config.replace("@@totem_options\n",totem_options)
-        if returnConfig:
-            return corosync_config
-
-        utils.setCorosyncConf(corosync_config)
+                if "--mcast{0}".format(interface) in options:
+                    mcastaddr = options["--mcast{0}".format(interface)]
+                else:
+                    mcastaddr = "239.255.{0}.1".format(interface + 1)
+                ring_options["mcastaddr"] = mcastaddr
+                if "--mcastport{0}".format(interface) in options:
+                    mcastport = options["--mcastport{0}".format(interface)]
+                else:
+                    mcastport = "5405"
+                ring_options["mcastport"] = mcastport
+                if "--ttl{0}".format(interface) in options:
+                    ring_options["ttl"] = options["--ttl{0}".format(interface)]
+            parsed["transport_options"]["rings_options"].append(ring_options)
+
+    if "--ipv6" in options:
+        parsed["transport_options"]["ip_version"] = "ipv6"
+
+    quorum_options_names = {
+        "--wait_for_all": "wait_for_all",
+        "--auto_tie_breaker": "auto_tie_breaker",
+        "--last_man_standing": "last_man_standing",
+        "--last_man_standing_window": "last_man_standing_window",
+    }
+    for opt_name, parsed_name in quorum_options_names.items():
+        if opt_name in options:
+            parsed["quorum_options"][parsed_name] = options[opt_name]
+    for opt_name in (
+        "--wait_for_all", "--auto_tie_breaker", "--last_man_standing"
+    ):
+        if opt_name in options and options[opt_name] not in ("0", "1"):
+            messages.append({
+                "text": "'{0}' is not a valid value for {1}, use 0 or 1".format(
+                    options[opt_name], opt_name
+                ),
+                "type": "error",
+                "forceable": False,
+            })
+
+    return parsed, messages
+
+def cluster_setup_parse_options_cman(options):
+    messages = []
+    parsed = {
+        "transport_options": {
+            "rings_options": [],
+        },
+        "totem_options": {},
+    }
+
+    broadcast = ("--broadcast0" in options) or ("--broadcast1" in options)
+    if broadcast:
+        transport = "udpb"
+        parsed["transport_options"]["broadcast"] = True
+        ring_missing_broadcast = None
+        if "--broadcast0" not in options:
+            ring_missing_broadcast = "0"
+        if "--broadcast1" not in options:
+            ring_missing_broadcast = "1"
+        if ring_missing_broadcast:
+            messages.append({
+                "text": (
+                    "Enabling broadcast for ring {0} as CMAN does not support "
+                    + "broadcast in only one ring"
+                ).format(ring_missing_broadcast),
+                "type": "warning",
+                "forceable": False,
+            })
     else:
-        broadcast = (
-            ("--broadcast0" in utils.pcs_options)
-            or
-            ("--broadcast1" in utils.pcs_options)
-        )
-        if broadcast:
-            transport = "udpb"
-            if "--broadcast0" not in utils.pcs_options:
-                print("Warning: Enabling broadcast for ring 0 "
-                    + "as CMAN does not support broadcast in only one ring")
-            if "--broadcast1" not in utils.pcs_options:
-                print("Warning: Enabling broadcast for ring 1 "
-                    + "as CMAN does not support broadcast in only one ring")
-
-        cluster_conf_location = settings.cluster_conf_file
-        if returnConfig:
-            cc_temp = tempfile.NamedTemporaryFile('w+b', -1, ".pcs")
-            cluster_conf_location = cc_temp.name
-
-        if os.path.exists(cluster_conf_location) and not "--force" in utils.pcs_options and not returnConfig:
-            print "Error: %s already exists, use --force to overwrite" % cluster_conf_location
-            sys.exit(1)
-        output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", cluster_conf_location, "--createcluster", cluster_name])
-        if retval != 0:
-            print output
-            utils.err("error creating cluster: %s" % cluster_name)
-        output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", cluster_conf_location, "--addfencedev", "pcmk-redirect", "agent=fence_pcmk"])
-        if retval != 0:
-            print output
-            utils.err("error creating fence dev: %s" % cluster_name)
-
-        cman_opts = []
-        cman_opts.append("transport=" + transport)
-        cman_opts.append("broadcast=" + ("yes" if broadcast else "no"))
-        if len(nodes) == 2:
-            cman_opts.append("two_node=1")
-            cman_opts.append("expected_votes=1")
-        output, retval = utils.run(
-            ["/usr/sbin/ccs", "-f", cluster_conf_location, "--setcman"]
-            + cman_opts
-        )
-        if retval != 0:
-            print output
-            utils.err("error setting cman options")
+        transport = "udp"
+        if "--transport" in options:
+            transport = options["--transport"]
+            if transport not in ("udp", "udpu"):
+                messages.append({
+                    "text": "unknown transport '{0}'".format(transport),
+                    "type": "error",
+                    "forceable": True,
+                })
+    parsed["transport_options"]["transport"] = transport
+
+    if transport == "udpu":
+        messages.append({
+            "text": (
+                "Using udpu transport on a CMAN cluster, "
+                + "cluster restart is required after node add or remove"
+            ),
+            "type": "warning",
+            "forceable": False,
+        })
+    if transport == "udpu" and ("--addr0" in options or "--addr1" in options):
+        messages.append({
+            "text": "--addr0 and --addr1 can only be used with --transport=udp",
+            "type": "error",
+            "forceable": False,
+        })
 
-        for node in nodes:
-            if udpu_rrp:
-                node0, node1 = node.split(",")
-            elif "--addr1" in utils.pcs_options:
-                node0 = node
-                node1 = utils.pcs_options["--addr1"]
+    rrpmode = None
+    if "--rrpmode" in options or "--addr0" in options:
+        rrpmode = "passive"
+        if "--rrpmode" in options:
+            rrpmode = options["--rrpmode"]
+        if rrpmode not in ("passive", "active"):
+            messages.append({
+                "text": "{0} is an unknown RRP mode".format(rrpmode),
+                "type": "error",
+                "forceable": True,
+            })
+        if rrpmode == "active":
+            messages.append({
+                "text": "using a RRP mode of 'active' is not supported or tested",
+                "type": "error",
+                "forceable": True,
+            })
+    if rrpmode:
+        parsed["transport_options"]["rrp_mode"] = rrpmode
+
+    totem_options_names = {
+        "--token": "token",
+        "--join": "join",
+        "--consensus": "consensus",
+        "--miss_count_const": "miss_count_const",
+        "--fail_recv_const": "fail_recv_const",
+    }
+    for opt_name, parsed_name in totem_options_names.items():
+        if opt_name in options:
+            parsed["totem_options"][parsed_name] = options[opt_name]
+
+    if not broadcast:
+        for interface in (0, 1):
+            if "--addr{0}".format(interface) not in options:
+                continue
+            ring_options = {}
+            if "--mcast{0}".format(interface) in options:
+                mcastaddr = options["--mcast{0}".format(interface)]
             else:
-                node0 = node
-                node1 = None
-            output, retval = utils.run(["/usr/sbin/ccs", "-f", cluster_conf_location, "--addnode", node0])
-            if retval != 0:
-                print output
-                utils.err("error adding node: %s" % node0)
-            if node1:
-                output, retval = utils.run([
-                    "/usr/sbin/ccs", "-f", cluster_conf_location,
-                    "--addalt", node0, node1
-                ])
-                if retval != 0:
-                    print output
-                    utils.err(
-                        "error adding alternative address for node: %s" % node0
+                mcastaddr = "239.255.{0}.1".format(interface + 1)
+            ring_options["mcastaddr"] = mcastaddr
+            if "--mcastport{0}".format(interface) in options:
+                ring_options["mcastport"] = options[
+                    "--mcastport{0}".format(interface)
+                ]
+            if "--ttl{0}".format(interface) in options:
+                ring_options["ttl"] = options["--ttl{0}".format(interface)]
+            parsed["transport_options"]["rings_options"].append(ring_options)
+
+    ignored_options_names = (
+        "--wait_for_all",
+        "--auto_tie_breaker",
+        "--last_man_standing",
+        "--last_man_standing_window",
+        "--token_coefficient",
+        "--ipv6",
+    )
+    for opt_name in ignored_options_names:
+        if opt_name in options:
+            text = "{0} ignored as it is not supported on CMAN clusters".format(
+                opt_name
+            )
+            messages.append({
+                "text": text,
+                "type": "warning",
+                "forceable": False,
+            })
+
+    return parsed, messages
+
+def cluster_setup_create_corosync_conf(
+    cluster_name, node_list, transport_options, totem_options, quorum_options
+):
+    messages = []
+
+    corosync_conf = corosync_conf_utils.Section("")
+    totem_section = corosync_conf_utils.Section("totem")
+    nodelist_section = corosync_conf_utils.Section("nodelist")
+    quorum_section = corosync_conf_utils.Section("quorum")
+    logging_section = corosync_conf_utils.Section("logging")
+    corosync_conf.add_section(totem_section)
+    corosync_conf.add_section(nodelist_section)
+    corosync_conf.add_section(quorum_section)
+    corosync_conf.add_section(logging_section)
+
+    totem_section.add_attribute("version", "2")
+    totem_section.add_attribute("secauth", "off")
+    totem_section.add_attribute("cluster_name", cluster_name)
+
+    transport_options_names = (
+        "transport",
+        "rrp_mode",
+        "ip_version",
+    )
+    for opt_name in transport_options_names:
+        if opt_name in transport_options:
+            totem_section.add_attribute(opt_name, transport_options[opt_name])
+
+    totem_options_names = (
+        "token",
+        "token_coefficient",
+        "join",
+        "consensus",
+        "miss_count_const",
+        "fail_recv_const",
+    )
+    for opt_name in totem_options_names:
+        if opt_name in totem_options:
+            totem_section.add_attribute(opt_name, totem_options[opt_name])
+
+    transport = None
+    if "transport" in transport_options:
+        transport = transport_options["transport"]
+    if transport == "udp":
+        if "rings_options" in transport_options:
+            for ring_number, ring_options in enumerate(
+                transport_options["rings_options"]
+            ):
+                interface_section = corosync_conf_utils.Section("interface")
+                totem_section.add_section(interface_section)
+                interface_section.add_attribute("ringnumber", ring_number)
+                if "addr" in ring_options:
+                    interface_section.add_attribute(
+                        "bindnetaddr", ring_options["addr"]
                     )
-            output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", cluster_conf_location, "--addmethod", "pcmk-method", node0])
-            if retval != 0:
-                print output
-                utils.err("error adding fence method: %s" % node0)
-            output, retval = utils.run(["/usr/sbin/ccs", "-i", "-f", cluster_conf_location, "--addfenceinst", "pcmk-redirect", node0, "pcmk-method", "port="+node0])
-            if retval != 0:
-                print output
-                utils.err("error adding fence instance: %s" % node0)
+                if "broadcast" in ring_options and ring_options["broadcast"]:
+                    interface_section.add_attribute("broadcast", "yes")
+                else:
+                    for opt_name in ("mcastaddr", "mcastport", "ttl"):
+                        if opt_name in ring_options:
+                            interface_section.add_attribute(
+                                opt_name,
+                                ring_options[opt_name]
+                            )
+
+    for node_id, node_options in enumerate(node_list, 1):
+        node_section = corosync_conf_utils.Section("node")
+        nodelist_section.add_section(node_section)
+        for opt_name in ("ring0_addr", "ring1_addr"):
+            if opt_name in node_options:
+                node_section.add_attribute(opt_name, node_options[opt_name])
+        node_section.add_attribute("nodeid", node_id)
+
+    quorum_section.add_attribute("provider", "corosync_votequorum")
+    quorum_options_names = (
+        "wait_for_all",
+        "auto_tie_breaker",
+        "last_man_standing",
+        "last_man_standing_window",
+    )
+    for opt_name in quorum_options_names:
+        if opt_name in quorum_options:
+            quorum_section.add_attribute(opt_name, quorum_options[opt_name])
+    auto_tie_breaker = (
+        "auto_tie_breaker" in quorum_options
+        and
+        quorum_options["auto_tie_breaker"] == "1"
+    )
+    if len(node_list) == 2 and not auto_tie_breaker:
+        quorum_section.add_attribute("two_node", "1")
 
-        if not broadcast:
-            for interface in ("0", "1"):
-                if "--addr" + interface not in utils.pcs_options:
-                    continue
-                mcastaddr = "239.255.1.1" if interface == "0" else "239.255.2.1"
+    logging_section.add_attribute("to_logfile", "yes")
+    logging_section.add_attribute("logfile", "/var/log/cluster/corosync.log")
+    logging_section.add_attribute("to_syslog", "yes")
+
+    return str(corosync_conf), messages
+
+def cluster_setup_create_cluster_conf(
+    cluster_name, node_list, transport_options, totem_options
+):
+    broadcast = (
+        "broadcast" in transport_options
+        and
+        transport_options["broadcast"]
+    )
+
+    commands = []
+    commands.append({
+        "cmd": ["-i", "--createcluster", cluster_name],
+        "err": "error creating cluster: {0}".format(cluster_name),
+    })
+    commands.append({
+        "cmd": ["-i", "--addfencedev", "pcmk-redirect", "agent=fence_pcmk"],
+        "err": "error creating fence dev: {0}".format(cluster_name),
+    })
+
+    cman_opts = []
+    if "transport" in transport_options:
+        cman_opts.append("transport=" + transport_options["transport"])
+    cman_opts.append("broadcast=" + ("yes" if broadcast else "no"))
+    if len(node_list) == 2:
+        cman_opts.append("two_node=1")
+        cman_opts.append("expected_votes=1")
+    commands.append({
+        "cmd": ["--setcman"] + cman_opts,
+        "err": "error setting cman options",
+    })
+
+    for node_options in node_list:
+        if "ring0_addr" in node_options:
+            ring0_addr = node_options["ring0_addr"]
+            commands.append({
+                "cmd": ["--addnode", ring0_addr],
+                "err": "error adding node: {0}".format(ring0_addr),
+            })
+            if "ring1_addr" in node_options:
+                ring1_addr = node_options["ring1_addr"]
+                commands.append({
+                    "cmd": ["--addalt", ring0_addr, ring1_addr],
+                    "err": (
+                        "error adding alternative address for node: {0}".format(
+                            ring0_addr
+                        )
+                    ),
+                })
+            commands.append({
+                "cmd": ["-i", "--addmethod", "pcmk-method", ring0_addr],
+                "err": "error adding fence method: {0}".format(ring0_addr),
+            })
+            commands.append({
+                "cmd": [
+                    "-i", "--addfenceinst", "pcmk-redirect", ring0_addr,
+                    "pcmk-method", "port=" + ring0_addr
+                ],
+                "err": "error adding fence instance: {0}".format(ring0_addr),
+            })
+
+    if not broadcast:
+        if "rings_options" in transport_options:
+            for ring_number, ring_options in enumerate(
+                transport_options["rings_options"]
+            ):
                 mcast_options = []
-                if "--mcast" + interface in utils.pcs_options:
-                    mcastaddr = utils.pcs_options["--mcast" + interface]
-                mcast_options.append(mcastaddr)
-                if "--mcastport" + interface in utils.pcs_options:
-                    mcast_options.append(
-                        "port=" + utils.pcs_options["--mcastport" + interface]
-                    )
-                if "--ttl" + interface in utils.pcs_options:
-                    mcast_options.append(
-                        "ttl=" + utils.pcs_options["--ttl" + interface]
-                    )
-                output, retval = utils.run(
-                    ["/usr/sbin/ccs", "-f", cluster_conf_location,
-                    "--setmulticast" if interface == "0" else "--setaltmulticast"]
-                    + mcast_options
-                )
-                if retval != 0:
-                    print output
-                    utils.err("error adding ring%s settings" % interface)
-
-        totem_options = []
-        if "--token" in utils.pcs_options:
-            totem_options.append("token=" + utils.pcs_options["--token"])
-        if "--join" in utils.pcs_options:
-            totem_options.append("join=" + utils.pcs_options["--join"])
-        if "--consensus" in utils.pcs_options:
-            totem_options.append(
-                "consensus=" + utils.pcs_options["--consensus"]
-            )
-        if "--miss_count_const" in utils.pcs_options:
-            totem_options.append(
-                "miss_count_const=" + utils.pcs_options["--miss_count_const"]
-            )
-        if "--fail_recv_const" in utils.pcs_options:
-            totem_options.append(
-                "fail_recv_const=" + utils.pcs_options["--fail_recv_const"]
-            )
-        if rrpmode:
-            totem_options.append("rrp_mode=" + rrpmode)
-        if totem_options:
-            output, retval = utils.run(
-                ["/usr/sbin/ccs", "-f", cluster_conf_location, "--settotem"]
-                + totem_options
+                if "mcastaddr" in ring_options:
+                    mcast_options.append(ring_options["mcastaddr"])
+                if "mcastport" in ring_options:
+                    mcast_options.append("port=" + ring_options["mcastport"])
+                if "ttl" in ring_options:
+                    mcast_options.append("ttl=" + ring_options["ttl"])
+                if ring_number == 0:
+                    cmd_name = "--setmulticast"
+                else:
+                    cmd_name = "--setaltmulticast"
+                commands.append({
+                    "cmd": [cmd_name] + mcast_options,
+                    "err": "error adding ring{0} settings".format(ring_number),
+                })
+
+    totem_options_names = (
+        "token",
+        "join",
+        "consensus",
+        "miss_count_const",
+        "fail_recv_const",
+    )
+    totem_cmd_options = []
+    for opt_name in totem_options_names:
+        if opt_name in totem_options:
+            totem_cmd_options.append(
+                "{0}={1}".format(opt_name, totem_options[opt_name])
             )
-            if retval != 0:
-                print output
-                utils.err("error setting totem options")
-
-        if "--wait_for_all" in utils.pcs_options:
-            print "Warning: --wait_for_all"\
-                " ignored as it is not supported on CMAN clusters"
-        if "--auto_tie_breaker" in utils.pcs_options:
-            print "Warning: --auto_tie_breaker"\
-                " ignored as it is not supported on CMAN clusters"
-        if "--last_man_standing" in utils.pcs_options:
-            print "Warning: --last_man_standing"\
-                " ignored as it is not supported on CMAN clusters"
-        if "--last_man_standing_window" in utils.pcs_options:
-            print "Warning: --last_man_standing_window"\
-                " ignored as it is not supported on CMAN clusters"
-        if "--token_coefficient" in utils.pcs_options:
-            print "Warning: --token_coefficient"\
-                " ignored as it is not supported on CMAN clusters"
-        if "--ipv6" in utils.pcs_options:
-            print "Warning: --ipv6"\
-                " ignored as it is not supported on CMAN clusters"
-
-        if returnConfig:
-            cc_temp.seek(0)
-            cluster_conf_data = cc_temp.read()
-            cc_temp.close()
-            return cluster_conf_data
-
-
-    if "--start" in utils.pcs_options:
-        start_cluster([])
-    if "--enable" in utils.pcs_options:
-        enable_cluster([])
+    if "rrp_mode" in transport_options:
+        totem_cmd_options.append(
+            "rrp_mode={0}".format(transport_options["rrp_mode"])
+        )
+    if totem_cmd_options:
+        commands.append({
+            "cmd": ["--settotem"] + totem_cmd_options,
+            "err": "error setting totem options",
+        })
+
+    messages = []
+    conf_temp = tempfile.NamedTemporaryFile(mode="w+", suffix=".pcs")
+    conf_path = conf_temp.name
+    cmd_prefix = ["ccs", "-f", conf_path]
+    for cmd_item in commands:
+        output, retval = utils.run(cmd_prefix + cmd_item["cmd"])
+        if retval != 0:
+            if output:
+                messages.append({
+                    "text": output,
+                    "type": "plain",
+                })
+            messages.append({
+                "text": cmd_item["err"],
+                "type": "error",
+                "forceable": False,
+            })
+            conf_temp.close()
+            return "", messages
+    conf_temp.seek(0)
+    cluster_conf = conf_temp.read()
+    conf_temp.close()
+    return cluster_conf, messages
+
+def cluster_setup_print_messages(messages):
+    critical_error = False
+    for msg in messages:
+        if msg["type"] == "error":
+            if msg["forceable"] and "--force" in utils.pcs_options:
+                # Let the user know what may be wrong even when --force is used,
+                # as it may be used for override early errors hiding later
+                # errors otherwise.
+                print("Warning: " + msg["text"])
+                continue
+            text = msg["text"]
+            if msg["forceable"]:
+                text += ", use --force to override"
+            critical_error = True
+            utils.err(text, False)
+        elif msg["type"] == "warning":
+            print("Warning: " + msg["text"])
+        else:
+            print(msg["text"])
+    if critical_error:
+        sys.exit(1)
 
 def get_local_network():
     args = ["/sbin/ip", "route"]
@@ -649,26 +981,26 @@ def start_cluster(argv):
         start_cluster_nodes(argv)
         return
 
-    print "Starting Cluster..."
+    print("Starting Cluster...")
     if utils.is_rhel6():
 #   Verify that CMAN_QUORUM_TIMEOUT is set, if not, then we set it to 0
-        retval, output = commands.getstatusoutput('source /etc/sysconfig/cman ; [ -z "$CMAN_QUORUM_TIMEOUT" ]')
+        retval, output = getstatusoutput('source /etc/sysconfig/cman ; [ -z "$CMAN_QUORUM_TIMEOUT" ]')
         if retval == 0:
             with open("/etc/sysconfig/cman", "a") as cman_conf_file:
                 cman_conf_file.write("\nCMAN_QUORUM_TIMEOUT=0\n")
 
         output, retval = utils.run(["service", "cman","start"])
         if retval != 0:
-            print output
+            print(output)
             utils.err("unable to start cman")
     else:
         output, retval = utils.run(["service", "corosync","start"])
         if retval != 0:
-            print output
+            print(output)
             utils.err("unable to start corosync")
     output, retval = utils.run(["service", "pacemaker", "start"])
     if retval != 0:
-        print output
+        print(output)
         utils.err("unable to start pacemaker")
 
 def start_cluster_all():
@@ -748,28 +1080,29 @@ def stop_cluster_nodes(nodes):
         utils.err("unable to stop all nodes\n" + "\n".join(error_list))
 
 def node_standby(argv,standby=True):
-    # If we didn't specify any arguments, use the current node name
-    if len(argv) == 0 and "--all" not in utils.pcs_options:
-        p = subprocess.Popen(["uname","-n"], stdout=subprocess.PIPE)
-        cur_node = p.stdout.readline().rstrip()
-        argv = [cur_node]
+    if len(argv) > 1:
+        if standby:
+            usage.cluster(["standby"])
+        else:
+            usage.cluster(["unstandby"])
+        sys.exit(1)
 
     nodes = utils.getNodesFromPacemaker()
 
     if "--all" not in utils.pcs_options:
-        nodeFound = False
-        for node in nodes:
-            if node == argv[0]:
-                nodeFound = True
-                break
-
-        if not nodeFound:
-            utils.err("node '%s' does not appear to exist in configuration" % argv[0])
-
+        options_node = []
+        if argv:
+            if argv[0] not in nodes:
+                utils.err(
+                    "node '%s' does not appear to exist in configuration"
+                    % argv[0]
+                )
+            else:
+                options_node = ["-N", argv[0]]
         if standby:
-            utils.run(["crm_standby", "-v", "on", "-N", node])
+            utils.run(["crm_standby", "-v", "on"] + options_node)
         else:
-            utils.run(["crm_standby", "-D", "-N", node])
+            utils.run(["crm_standby", "-D"] + options_node)
     else:
         for node in nodes:
             if standby:
@@ -869,24 +1202,24 @@ def stop_cluster(argv):
         stop_cluster_corosync()
 
 def stop_cluster_pacemaker():
-    print "Stopping Cluster (pacemaker)...",
+    print("Stopping Cluster (pacemaker)...")
     output, retval = utils.run(["service", "pacemaker","stop"])
     if retval != 0:
-        print output,
+        print(output)
         utils.err("unable to stop pacemaker")
 
 def stop_cluster_corosync():
     if utils.is_rhel6():
-        print "Stopping Cluster (cman)...",
+        print("Stopping Cluster (cman)...")
         output, retval = utils.run(["service", "cman","stop"])
         if retval != 0:
-            print output,
+            print(output)
             utils.err("unable to stop cman")
     else:
-        print "Stopping Cluster (corosync)...",
+        print("Stopping Cluster (corosync)...")
         output, retval = utils.run(["service", "corosync","stop"])
         if retval != 0:
-            print output,
+            print(output)
             utils.err("unable to stop corosync")
 
 def kill_cluster(argv):
@@ -940,13 +1273,13 @@ def cluster_push(argv):
     if retval != 0:
         utils.err("unable to push cib\n" + output)
     else:
-        print "CIB updated"
+        print("CIB updated")
 
 def cluster_upgrade():
     output, retval = utils.run(["cibadmin", "--upgrade", "--force"])
     if retval != 0:
         utils.err("unable to upgrade cluster: %s" % output)
-    print "Cluster CIB has been upgraded to latest version"
+    print("Cluster CIB has been upgraded to latest version")
 
 def cluster_edit(argv):
     if 'EDITOR' in os.environ:
@@ -978,7 +1311,7 @@ def cluster_edit(argv):
             scope_arg = ""
 
         editor = os.environ['EDITOR']
-        tempcib = tempfile.NamedTemporaryFile('w+b',-1,".pcs")
+        tempcib = tempfile.NamedTemporaryFile(mode="w+", suffix=".pcs")
         cib = utils.get_cib(scope)
         tempcib.write(cib)
         tempcib.flush()
@@ -990,9 +1323,9 @@ def cluster_edit(argv):
         tempcib.seek(0)
         newcib = "".join(tempcib.readlines())
         if newcib == cib:
-            print "CIB not updated, no changes detected"
+            print("CIB not updated, no changes detected")
         else:
-            cluster_push(filter(None, [tempcib.name, scope_arg]))
+            cluster_push([arg for arg in [tempcib.name, scope_arg] if arg])
 
     else:
         utils.err("$EDITOR environment variable is not set")
@@ -1021,7 +1354,7 @@ def get_cib(argv):
         scope = "configuration"
 
     if not filename:
-        print utils.get_cib(scope),
+        print(utils.get_cib(scope), end="")
     else:
         try:
             f = open(filename, 'w')
@@ -1059,6 +1392,8 @@ def cluster_node(argv):
             "%s is not yet authenticated (try pcs cluster auth %s)"
             % (node0, node0)
         )
+    elif status != 0:
+        utils.err(output)
 
     if add_node == True:
         need_ring1_address = utils.need_ring1_address(utils.getCorosyncConf())
@@ -1080,18 +1415,49 @@ def cluster_node(argv):
         for my_node in utils.getNodesFromCorosyncConf():
             retval, output = utils.addLocalNode(my_node, node0, node1)
             if retval != 0:
-                print >> sys.stderr, "Error: unable to add %s on %s - %s" % (node0, my_node, output.strip())
+                utils.err(
+                    "unable to add %s on %s - %s" % (node0, my_node, output.strip()),
+                    False
+                )
             else:
-                print "%s: Corosync updated" % my_node
+                print("%s: Corosync updated" % my_node)
                 corosync_conf = output
         if corosync_conf != None:
+            # send local cluster pcsd configs to the new node
+            # may be used for sending corosync config as well in future
+            pcsd_data = {
+                'nodes': [node0],
+                'force': True,
+            }
+            output, retval = utils.run_pcsdcli('send_local_configs', pcsd_data)
+            if retval != 0:
+                utils.err("Unable to set pcsd configs")
+            if output['status'] == 'notauthorized':
+                utils.err(
+                    "Unable to authenticate to " + node0
+                    + ", try running 'pcs cluster auth'"
+                )
+            if output['status'] == 'ok' and output['data']:
+                try:
+                    node_response = output['data'][node0]
+                    if node_response['status'] not in ['ok', 'not_supported']:
+                        utils.err("Unable to set pcsd configs")
+                except:
+                    utils.err('Unable to communicate with pcsd')
+
             utils.setCorosyncConfig(node0, corosync_conf)
             if "--enable" in utils.pcs_options:
-                utils.enableCluster(node0)
+                retval, err = utils.enableCluster(node0)
+                if retval != 0:
+                    print("Warning: enable cluster - {0}".format(err))
             if "--start" in utils.pcs_options or utils.is_rhel6():
                 # always start new node on cman cluster
                 # otherwise it will get fenced
-                utils.startCluster(node0)
+                retval, err = utils.startCluster(node0)
+                if retval != 0:
+                    print("Warning: start cluster - {0}".format(err))
+
+            pcsd.pcsd_sync_certs([node0], exit_after_error=False)
         else:
             utils.err("Unable to update any nodes")
         output, retval = utils.reloadCorosync()
@@ -1099,6 +1465,41 @@ def cluster_node(argv):
             print("Warning: Using udpu transport on a CMAN cluster, "
                 + "cluster restart is required to apply node addition")
     else:
+        if node0 not in utils.getNodesFromCorosyncConf():
+            utils.err(
+                "node '%s' does not appear to exist in configuration" % node0
+            )
+        if not "--force" in utils.pcs_options:
+            retval, data = utils.get_remote_quorumtool_output(node0)
+            if retval != 0:
+                utils.err(
+                    "Unable to determine whether removing the node will cause "
+                    + "a loss of the quorum, use --force to override\n"
+                    + data
+                )
+            # we are sure whether we are on cman cluster or not because only
+            # nodes from a local cluster can be stopped (see nodes validation
+            # above)
+            if utils.is_rhel6():
+                quorum_info = utils.parse_cman_quorum_info(data)
+            else:
+                quorum_info = utils.parse_quorumtool_output(data)
+            if quorum_info:
+                if utils.is_node_stop_cause_quorum_loss(
+                    quorum_info, local=False, node_list=[node0]
+                ):
+                    utils.err(
+                        "Removing the node will cause a loss of the quorum"
+                        + ", use --force to override"
+                    )
+            elif not utils.is_node_offline_by_quorumtool_output(data):
+                utils.err(
+                    "Unable to determine whether removing the node will cause "
+                    + "a loss of the quorum, use --force to override\n"
+                    + data
+                )
+            # else the node seems to be stopped already, we're ok to proceed
+
         nodesRemoved = False
         c_nodes = utils.getNodesFromCorosyncConf()
         destroy_cluster([node0])
@@ -1107,13 +1508,19 @@ def cluster_node(argv):
                 continue
             retval, output = utils.removeLocalNode(my_node, node0)
             if retval != 0:
-                print >> sys.stderr, "Error: unable to remove %s on %s - %s" % (node0,my_node,output.strip())
+                utils.err(
+                    "unable to remove %s on %s - %s" % (node0,my_node,output.strip()),
+                    False
+                )
             else:
                 if output[0] == 0:
-                    print "%s: Corosync updated" % my_node
+                    print("%s: Corosync updated" % my_node)
                     nodesRemoved = True
                 else:
-                    print >> sys.stderr, "%s: Error executing command occured: %s" % (my_node, "".join(output[1]))
+                    utils.err(
+                        "%s: Error executing command occured: %s" % (my_node, "".join(output[1])),
+                        False
+                    )
         if nodesRemoved == False:
             utils.err("Unable to update any nodes")
 
@@ -1135,7 +1542,7 @@ def cluster_localnode(argv):
             success = utils.addNodeToClusterConf(node)
 
         if success:
-            print "%s: successfully added!" % node
+            print("%s: successfully added!" % node)
         else:
             utils.err("unable to add %s" % node)
     elif argv[0] in ["remove","delete"]:
@@ -1146,7 +1553,7 @@ def cluster_localnode(argv):
             success = utils.removeNodeFromClusterConf(node)
 
         if success:
-            print "%s: successfully removed!" % node
+            print("%s: successfully removed!" % node)
         else:
             utils.err("unable to remove %s" % node)
     else:
@@ -1159,16 +1566,16 @@ def cluster_uidgid_rhel6(argv, silent_list = False):
 
     if len(argv) == 0:
         found = False
-        output, retval = utils.run(["/usr/sbin/ccs", "-f", settings.cluster_conf_file, "--lsmisc"])
+        output, retval = utils.run(["ccs", "-f", settings.cluster_conf_file, "--lsmisc"])
         if retval != 0:
             utils.err("error running ccs\n" + output)
         lines = output.split('\n')
         for line in lines:
             if line.startswith('UID/GID: '):
-                print line
+                print(line)
                 found = True
         if not found and not silent_list:
-            print "No uidgids configured in cluster.conf"
+            print("No uidgids configured in cluster.conf")
         return
     
     command = argv.pop(0)
@@ -1191,11 +1598,11 @@ def cluster_uidgid_rhel6(argv, silent_list = False):
             utils.err("you must set either uid or gid")
 
         if command == "add":
-            output, retval = utils.run(["/usr/sbin/ccs", "-f", settings.cluster_conf_file, "--setuidgid", "uid="+uid, "gid="+gid])
+            output, retval = utils.run(["ccs", "-f", settings.cluster_conf_file, "--setuidgid", "uid="+uid, "gid="+gid])
             if retval != 0:
                 utils.err("unable to add uidgid\n" + output.rstrip())
         elif command == "rm":
-            output, retval = utils.run(["/usr/sbin/ccs", "-f", settings.cluster_conf_file, "--rmuidgid", "uid="+uid, "gid="+gid])
+            output, retval = utils.run(["ccs", "-f", settings.cluster_conf_file, "--rmuidgid", "uid="+uid, "gid="+gid])
             if retval != 0:
                 utils.err("unable to remove uidgid\n" + output.rstrip())
 
@@ -1225,10 +1632,10 @@ def cluster_uidgid(argv, silent_list = False):
                 if "gid" in uid_gid_dict:
                     line += uid_gid_dict["gid"]
 
-                print line
+                print(line)
                 found = True
         if not found and not silent_list:
-            print "No uidgids configured in cluster.conf"
+            print("No uidgids configured in cluster.conf")
         return
 
     command = argv.pop(0)
@@ -1271,7 +1678,7 @@ def cluster_get_corosync_conf(argv):
         exit(1)
 
     if len(argv) == 0:
-        print utils.getCorosyncConf()
+        print(utils.getCorosyncConf(), end="")
         return
 
     node = argv[0]
@@ -1279,7 +1686,7 @@ def cluster_get_corosync_conf(argv):
     if retval != 0:
         utils.err(output)
     else:
-        print output
+        print(output, end="")
 
 def cluster_reload(argv):
     if len(argv) != 1 or argv[0] != "corosync":
@@ -1289,7 +1696,7 @@ def cluster_reload(argv):
     output, retval = utils.reloadCorosync()
     if retval != 0 or "invalid option" in output:
         utils.err(output.rstrip())
-    print "Corosync reloaded"
+    print("Corosync reloaded")
 
 # Completely tear down the cluster & remove config files
 # Code taken from cluster-clean script in pacemaker
@@ -1297,14 +1704,14 @@ def cluster_destroy(argv):
     if "--all" in utils.pcs_options:
         destroy_cluster(utils.getNodesFromCorosyncConf())
     else:
-        print "Shutting down pacemaker/corosync services..."
+        print("Shutting down pacemaker/corosync services...")
         os.system("service pacemaker stop")
         os.system("service corosync stop")
-        print "Killing any remaining services..."
+        print("Killing any remaining services...")
         os.system("killall -q -9 corosync aisexec heartbeat pacemakerd ccm stonithd ha_logd lrmd crmd pengine attrd pingd mgmtd cib fenced dlm_controld gfs_controld")
         utils.disableServices()
 
-        print "Removing all cluster configuration files..."
+        print("Removing all cluster configuration files...")
         if utils.is_rhel6():
             os.system("rm -f /etc/cluster/cluster.conf")
         else:
@@ -1334,7 +1741,7 @@ def cluster_verify(argv):
     output, retval = utils.run([settings.crm_verify] + options)
 
     if output != "":
-        print output
+        print(output)
     stonith.stonith_level_verify()
     return retval
 
@@ -1351,7 +1758,7 @@ def cluster_report(argv):
         else:
             try:
                 os.remove(dest_outfile)
-            except OSError, e:
+            except OSError as e:
                 utils.err("Unable to remove " + dest_outfile + ": " + e.strerror)
     crm_report_opts = []
 
@@ -1382,7 +1789,7 @@ def cluster_report(argv):
         newoutput = newoutput + line + "\n"
     if retval != 0:
         utils.err(newoutput)
-    print newoutput
+    print(newoutput)
 
 def cluster_remote_node(argv):
     if len(argv) < 1:
@@ -1396,7 +1803,7 @@ def cluster_remote_node(argv):
             sys.exit(1)
         hostname = argv.pop(0)
         rsc = argv.pop(0)
-        if not utils.is_resource(rsc):
+        if not utils.dom_get_resource(utils.get_cib_dom(), rsc):
             utils.err("unable to find resource '%s'" % rsc)
         resource.resource_update(rsc, ["meta", "remote-node="+hostname] + argv)
 
@@ -1456,7 +1863,7 @@ def cluster_quorum_unblock(argv):
     )
     if retval != 0:
         utils.err("unable to cancel waiting for nodes")
-    print "Quorum unblocked"
+    print("Quorum unblocked")
 
     startup_fencing = prop.get_set_properties().get("startup-fencing", "")
     utils.set_cib_property(
@@ -1464,7 +1871,7 @@ def cluster_quorum_unblock(argv):
         "false" if startup_fencing.lower() != "false" else "true"
     )
     utils.set_cib_property("startup-fencing", startup_fencing)
-    print "Waiting for nodes cancelled"
+    print("Waiting for nodes cancelled")
 
 class NodeActionThread(threading.Thread):
     def __init__(self, node):
diff --git a/pcs/config.py b/pcs/config.py
index 1be7d8c..1cd32f5 100644
--- a/pcs/config.py
+++ b/pcs/config.py
@@ -1,8 +1,14 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
 import sys
 import os
+import os.path
 import re
 import datetime
-import cStringIO
+from io import BytesIO
 import tarfile
 import json
 from xml.dom.minidom import parse
@@ -30,6 +36,7 @@ import status
 import stonith
 import usage
 
+
 def config_cmd(argv):
     if len(argv) == 0:
         config_show(argv)
@@ -56,39 +63,49 @@ def config_cmd(argv):
             sys.exit(1)
     elif sub_cmd == "import-cman":
         config_import_cman(argv)
+    elif sub_cmd == "export":
+        if not argv:
+            usage.config(["export"])
+            sys.exit(1)
+        elif argv[0] == "pcs-commands":
+            config_export_pcs_commands(argv[1:])
+        elif argv[0] == "pcs-commands-verbose":
+            config_export_pcs_commands(argv[1:], True)
+        else:
+            usage.config(["export"])
+            sys.exit(1)
     else:
         usage.config()
         sys.exit(1)
 
 def config_show(argv):
-    print "Cluster Name: %s" % utils.getClusterName()
+    print("Cluster Name: %s" % utils.getClusterName())
     status.nodes_status(["config"])
-    print ""
-    print ""
+    print()
     config_show_cib()
     cluster.cluster_uidgid([], True)
 
 def config_show_cib():
-    print "Resources: "
+    print("Resources:")
     utils.pcs_options["--all"] = 1
     utils.pcs_options["--full"] = 1
     resource.resource_show([])
-    print ""
-    print "Stonith Devices: "
+    print()
+    print("Stonith Devices:")
     resource.resource_show([], True)
-    print "Fencing Levels: "
-    print ""
+    print("Fencing Levels:")
+    print()
     stonith.stonith_level_show()
     constraint.location_show([])
     constraint.order_show([])
     constraint.colocation_show([])
-    print ""
+    print()
     del utils.pcs_options["--all"]
-    print "Resources Defaults:"
+    print("Resources Defaults:")
     resource.show_defaults("rsc_defaults", indent=" ")
-    print "Operations Defaults:"
+    print("Operations Defaults:")
     resource.show_defaults("op_defaults", indent=" ")
-    print
+    print()
     prop.list_property([])
 
 def config_backup(argv):
@@ -104,15 +121,21 @@ def config_backup(argv):
 
     tar_data = config_backup_local()
     if outfile_name:
-        ok, message = utils.write_file(outfile_name, tar_data)
+        ok, message = utils.write_file(
+            outfile_name, tar_data, permissions=0o600, binary=True
+        )
         if not ok:
             utils.err(message)
     else:
-        sys.stdout.write(tar_data)
+        # in python3 stdout accepts str so we need to use buffer
+        if hasattr(sys.stdout, "buffer"):
+            sys.stdout.buffer.write(tar_data)
+        else:
+            sys.stdout.write(tar_data)
 
 def config_backup_local():
     file_list = config_backup_path_list()
-    tar_data = cStringIO.StringIO()
+    tar_data = BytesIO()
 
     try:
         tarball = tarfile.open(fileobj=tar_data, mode="w|bz2")
@@ -142,12 +165,36 @@ def config_restore(argv):
     if argv:
         infile_name = argv[0]
     if not infile_name:
-        infile_obj = cStringIO.StringIO(sys.stdin.read())
+        # in python3 stdin returns str so we need to use buffer
+        if hasattr(sys.stdin, "buffer"):
+            infile_obj = BytesIO(sys.stdin.buffer.read())
+        else:
+            infile_obj = BytesIO(sys.stdin.read())
 
-    if "--local" in utils.pcs_options:
-        config_restore_local(infile_name, infile_obj)
+    if os.getuid() == 0:
+        if "--local" in utils.pcs_options:
+            config_restore_local(infile_name, infile_obj)
+        else:
+            config_restore_remote(infile_name, infile_obj)
     else:
-        config_restore_remote(infile_name, infile_obj)
+        new_argv = ['config', 'restore']
+        new_stdin = None
+        if '--local' in utils.pcs_options:
+            new_argv.append('--local')
+        if infile_name:
+            new_argv.append(os.path.abspath(infile_name))
+        else:
+            new_stdin = infile_obj.read()
+        err_msgs, exitcode, std_out, std_err = utils.call_local_pcsd(
+            new_argv, True, new_stdin
+        )
+        if err_msgs:
+            for msg in err_msgs:
+                utils.err(msg, False)
+            sys.exit(1)
+        print(std_out)
+        sys.stderr.write(std_err)
+        sys.exit(exitcode)
 
 def config_restore_remote(infile_name, infile_obj):
     extracted = {
@@ -158,6 +205,7 @@ def config_restore_remote(infile_name, infile_obj):
     try:
         tarball = tarfile.open(infile_name, "r|*", infile_obj)
         while True:
+            # next(tarball) does not work in python2.6
             tar_member_info = tarball.next()
             if tar_member_info is None:
                 break
@@ -172,31 +220,46 @@ def config_restore_remote(infile_name, infile_obj):
     config_backup_check_version(extracted["version.txt"])
 
     node_list = utils.getNodesFromCorosyncConf(
-        extracted["cluster.conf" if utils.is_rhel6() else "corosync.conf"]
+        extracted["cluster.conf" if utils.is_rhel6() else "corosync.conf"].decode("utf-8")
     )
     if not node_list:
         utils.err("no nodes found in the tarball")
 
+    err_msgs = []
     for node in node_list:
         try:
             retval, output = utils.checkStatus(node)
             if retval != 0:
-                utils.err("unable to determine status of the node %s" % node)
+                err_msgs.append(output)
+                continue
             status = json.loads(output)
             if status["corosync"] or status["pacemaker"] or status["cman"]:
-                utils.err(
+                err_msgs.append(
                     "Cluster is currently running on node %s. You need to stop "
                         "the cluster in order to restore the configuration."
                     % node
                 )
+                continue
         except (ValueError, NameError):
-            utils.err("unable to determine status of the node %s" % node)
+            err_msgs.append("unable to determine status of the node %s" % node)
+    if err_msgs:
+        for msg in err_msgs:
+            utils.err(msg, False)
+        sys.exit(1)
+
+    # Temporarily disable config files syncing thread in pcsd so it will not
+    # rewrite restored files. 10 minutes should be enough time to restore.
+    # If node returns HTTP 404 it does not support config syncing at all.
+    for node in node_list:
+        retval, output = utils.pauseConfigSyncing(node, 10 * 60)
+        if not (retval == 0 or output.endswith("(HTTP error: 404)")):
+            utils.err(output)
 
     if infile_obj:
         infile_obj.seek(0)
         tarball_data = infile_obj.read()
     else:
-        with open(infile_name, "r") as tarball:
+        with open(infile_name, "rb") as tarball:
             tarball_data = tarball.read()
 
     error_list = []
@@ -220,12 +283,13 @@ def config_restore_local(infile_name, infile_obj):
                 "the cluster in order to restore the configuration."
         )
 
-    file_list = config_backup_path_list()
+    file_list = config_backup_path_list(with_uid_gid=True)
     tarball_file_list = []
     version = None
     try:
         tarball = tarfile.open(infile_name, "r|*", infile_obj)
         while True:
+            # next(tarball) does not work in python2.6
             tar_member_info = tarball.next()
             if tar_member_info is None:
                 break
@@ -255,6 +319,7 @@ def config_restore_local(infile_name, infile_obj):
             infile_obj.seek(0)
         tarball = tarfile.open(infile_name, "r|*", infile_obj)
         while True:
+            # next(tarball) does not work in python2.6
             tar_member_info = tarball.next()
             if tar_member_info is None:
                 break
@@ -267,10 +332,12 @@ def config_restore_local(infile_name, infile_obj):
                 path = os.path.dirname(path)
             if not extract_info:
                 continue
-            tarball.extractall(
-                os.path.dirname(extract_info["path"]),
-                [tar_member_info]
-            )
+            path_extract = os.path.dirname(extract_info["path"])
+            tarball.extractall(path_extract, [tar_member_info])
+            path_full = os.path.join(path_extract, tar_member_info.name)
+            file_attrs = extract_info["attrs"]
+            os.chmod(path_full, file_attrs["mode"])
+            os.chown(path_full, file_attrs["uid"], file_attrs["gid"])
         tarball.close()
     except (tarfile.TarError, EnvironmentError) as e:
         utils.err("unable to restore the cluster: %s" % e)
@@ -284,9 +351,9 @@ def config_restore_local(infile_name, infile_obj):
 
 def config_backup_path_list(with_uid_gid=False, force_rhel6=None):
     rhel6 = utils.is_rhel6() if force_rhel6 is None else force_rhel6
-    root_attrs = {
+    corosync_attrs = {
         "mtime": int(time.time()),
-        "mode": 0644,
+        "mode": 0o644,
         "uname": "root",
         "gname": "root",
         "uid": 0,
@@ -294,7 +361,7 @@ def config_backup_path_list(with_uid_gid=False, force_rhel6=None):
     }
     cib_attrs = {
         "mtime": int(time.time()),
-        "mode": 0600,
+        "mode": 0o600,
         "uname": settings.pacemaker_uname,
         "gname": settings.pacemaker_gname,
     }
@@ -316,25 +383,37 @@ def config_backup_path_list(with_uid_gid=False, force_rhel6=None):
         "cib.xml": {
             "path": os.path.join(settings.cib_dir, "cib.xml"),
             "required": True,
-            "attrs": cib_attrs,
+            "attrs": dict(cib_attrs),
         },
     }
     if rhel6:
         file_list["cluster.conf"] = {
             "path": settings.cluster_conf_file,
             "required": True,
-            "attrs": root_attrs,
+            "attrs": dict(corosync_attrs),
         }
     else:
         file_list["corosync.conf"] = {
             "path": settings.corosync_conf_file,
             "required": True,
-            "attrs": root_attrs,
+            "attrs": dict(corosync_attrs),
         }
         file_list["uidgid.d"] = {
             "path": settings.corosync_uidgid_dir.rstrip("/"),
             "required": False,
-            "attrs": root_attrs,
+            "attrs": dict(corosync_attrs),
+        }
+        file_list["pcs_settings.conf"] = {
+            "path": settings.pcsd_settings_conf_location,
+            "required": False,
+            "attrs": {
+                "mtime": int(time.time()),
+                "mode": 0o644,
+                "uname": "root",
+                "gname": "root",
+                "uid": 0,
+                "gid": 0,
+            },
         }
     return file_list
 
@@ -358,11 +437,8 @@ def config_backup_check_version(version):
         utils.err("Cannot determine version of the backup")
 
 def config_backup_add_version_to_tarball(tarball, version=None):
-    return utils.tar_add_file_data(
-        tarball,
-        version if version is not None else str(config_backup_version()),
-        "version.txt"
-    )
+    ver = version if version is not None else str(config_backup_version())
+    return utils.tar_add_file_data(tarball, ver.encode("utf-8"), "version.txt")
 
 def config_backup_version():
     return 1
@@ -388,7 +464,7 @@ def config_checkpoint_list():
             pass
     cib_list.sort()
     if not cib_list:
-        print "No checkpoints available"
+        print("No checkpoints available")
         return
     for cib_info in cib_list:
         print(
@@ -425,7 +501,7 @@ def config_import_cman(argv):
     # prepare convertor options
     cluster_conf = settings.cluster_conf_file
     dry_run_output = None
-    rhel6 = utils.is_rhel6()
+    output_format = "cluster.conf" if utils.is_rhel6() else "corosync.conf"
     invalid_args = False
     for arg in argv:
         if "=" in arg:
@@ -434,19 +510,24 @@ def config_import_cman(argv):
                 cluster_conf = value
             elif name == "output":
                 dry_run_output = value
-                if not dry_run_output.endswith(".tar.bz2"):
-                    dry_run_output += ".tar.bz2"
             elif name == "output-format":
-                if value == "corosync.conf":
-                    rhel6 = False
-                elif value == "cluster.conf":
-                    rhel6 = True
+                if value in (
+                    "cluster.conf", "corosync.conf",
+                    "pcs-commands", "pcs-commands-verbose",
+                ):
+                    output_format = value
                 else:
                     invalid_args = True
             else:
                 invalid_args = True
         else:
             invalid_args = True
+    if (
+        output_format not in ("pcs-commands", "pcs-commands-verbose")
+        and
+        (dry_run_output and not dry_run_output.endswith(".tar.bz2"))
+    ):
+        dry_run_output += ".tar.bz2"
     if invalid_args or not dry_run_output:
         usage.config(["import-cman"])
         sys.exit(1)
@@ -455,10 +536,13 @@ def config_import_cman(argv):
     interactive = "--interactive" in utils.pcs_options
 
     clufter_args = {
-        "input": cluster_conf,
+        "input": str(cluster_conf),
         "cib": {"passin": "bytestring"},
         "nocheck": force,
         "batch": True,
+        "sys": "linux",
+        # Make it work on RHEL6 as well for sure
+        "color": "always" if sys.stdout.isatty() else "never"
     }
     if interactive:
         if "EDITOR" not in os.environ:
@@ -467,52 +551,70 @@ def config_import_cman(argv):
         clufter_args["editor"] = os.environ["EDITOR"]
     if debug:
         logging.getLogger("clufter").setLevel(logging.DEBUG)
-    if rhel6:
+    if output_format == "cluster.conf":
         clufter_args["ccs_pcmk"] = {"passin": "bytestring"}
-    else:
+        clufter_args["dist"] = "redhat,6.7,Santiago"
+        cmd_name = "ccs2pcs-flatiron"
+    elif output_format == "corosync.conf":
         clufter_args["coro"] = {"passin": "struct"}
-    clufter_args_obj = type('ClufterOptions', (object, ), clufter_args)
+        clufter_args["dist"] = "redhat,7.1,Maipo"
+        cmd_name = "ccs2pcs-needle"
+    elif output_format in ("pcs-commands", "pcs-commands-verbose"):
+        clufter_args["output"] = {"passin": "bytestring"}
+        clufter_args["start_wait"] = "60"
+        clufter_args["tmp_cib"] = "tmp-cib.xml"
+        clufter_args["force"] = force
+        clufter_args["text_width"] = "80"
+        clufter_args["silent"] = True
+        clufter_args["noguidance"] = True
+        if output_format == "pcs-commands-verbose":
+            clufter_args["text_width"] = "-1"
+            clufter_args["silent"] = False
+            clufter_args["noguidance"] = False
+        cmd_name = "ccs2pcscmd-flatiron"
+    clufter_args_obj = type(str("ClufterOptions"), (object, ), clufter_args)
 
     # run convertor
-    try:
-        cmd_name = "ccs2pcs-flatiron" if rhel6 else "ccs2pcs-needle"
-        result = None
-        cmd_manager = clufter.command_manager.CommandManager.init_lookup(
-            cmd_name
-        )
-        result = cmd_manager.commands[cmd_name](clufter_args_obj)
-        error_message = ""
-    except Exception as e:
-        error_message = str(e)
-    if error_message or result != 0:
-        hints = []
-        hints.append("--interactive to solve the issues manually")
-        if not debug:
-            hints.append("--debug to get more information")
-        if not force:
-            hints.append("--force to override")
-        hints_string = "\nTry using %s." % ", ".join(hints) if hints else ""
-        sys.stderr.write(
+    run_clufter(
+        cmd_name, clufter_args_obj, debug, force,
             "Error: unable to import cluster configuration"
-            + (": %s" % error_message if error_message else "")
-            + hints_string
-            + "\n"
+    )
+
+    # save commands
+    if output_format in ("pcs-commands", "pcs-commands-verbose"):
+        ok, message = utils.write_file(
+            dry_run_output,
+            clufter_args_obj.output["passout"]
         )
-        sys.exit(1 if result is None else result)
+        if not ok:
+            utils.err(message)
+        return
 
     # put new config files into tarball
-    file_list = config_backup_path_list(with_uid_gid=True, force_rhel6=rhel6)
-    tar_data = cStringIO.StringIO()
+    file_list = config_backup_path_list(
+        force_rhel6=(output_format == "cluster.conf")
+    )
+    for file_item in file_list.values():
+        file_item["attrs"]["uname"] = "root"
+        file_item["attrs"]["gname"] = "root"
+        file_item["attrs"]["uid"] = 0
+        file_item["attrs"]["gid"] = 0
+        file_item["attrs"]["mode"] = 0o600
+    tar_data = BytesIO()
     try:
         tarball = tarfile.open(fileobj=tar_data, mode="w|bz2")
         config_backup_add_version_to_tarball(tarball)
         utils.tar_add_file_data(
-            tarball, clufter_args_obj.cib["passout"], "cib.xml",
+            tarball,
+            clufter_args_obj.cib["passout"].encode("utf-8"),
+            "cib.xml",
             **file_list["cib.xml"]["attrs"]
         )
-        if rhel6:
+        if output_format == "cluster.conf":
             utils.tar_add_file_data(
-                tarball, clufter_args_obj.ccs_pcmk["passout"], "cluster.conf",
+                tarball,
+                clufter_args_obj.ccs_pcmk["passout"].encode("utf-8"),
+                "cluster.conf",
                 **file_list["cluster.conf"]["attrs"]
             )
         else:
@@ -531,7 +633,9 @@ def config_import_cman(argv):
                 "struct", ("corosync", (), corosync_struct)
             )("bytestring")
             utils.tar_add_file_data(
-                tarball, corosync_conf_data, "corosync.conf",
+                tarball,
+                corosync_conf_data.encode("utf-8"),
+                "corosync.conf",
                 **file_list["corosync.conf"]["attrs"]
             )
             for uidgid in uidgid_list:
@@ -547,7 +651,9 @@ def config_import_cman(argv):
                     "struct", ("corosync", (), [("uidgid", uidgid, None)])
                 )("bytestring")
                 utils.tar_add_file_data(
-                    tarball, uidgid_data, "uidgid.d/" + filename,
+                    tarball,
+                    uidgid_data.encode("utf-8"),
+                    "uidgid.d/" + filename,
                     **file_list["uidgid.d"]["attrs"]
                 )
         tarball.close()
@@ -557,10 +663,112 @@ def config_import_cman(argv):
 
     #save tarball / remote restore
     if dry_run_output:
-        ok, message = utils.write_file(dry_run_output, tar_data.read())
+        ok, message = utils.write_file(
+            dry_run_output, tar_data.read(), permissions=0o600, binary=True
+        )
         if not ok:
             utils.err(message)
     else:
         config_restore_remote(None, tar_data)
     tar_data.close()
 
+def config_export_pcs_commands(argv, verbose=False):
+    if no_clufter:
+        utils.err(
+            "Unable to perform export due to missing python-clufter package"
+        )
+
+    # parse options
+    debug = "--debug" in utils.pcs_options
+    force = "--force" in utils.pcs_options
+    interactive = "--interactive" in utils.pcs_options
+    invalid_args = False
+    output_file = None
+    for arg in argv:
+        if "=" in arg:
+            name, value = arg.split("=", 1)
+            if name == "output":
+                output_file = value
+            else:
+                invalid_args = True
+        else:
+            invalid_args = True
+    if invalid_args or not output_file:
+        usage.config(["export", "pcs-commands"])
+        sys.exit(1)
+
+    # prepare convertor options
+    clufter_args = {
+        "nocheck": force,
+        "batch": True,
+        "sys": "linux",
+        # Make it work on RHEL6 as well for sure
+        "color": "always" if sys.stdout.isatty() else "never",
+        "coro": settings.corosync_conf_file,
+        "ccs": settings.cluster_conf_file,
+        "output": {"passin": "bytestring"},
+        "start_wait": "60",
+        "tmp_cib": "tmp-cib.xml",
+        "force": force,
+        "text_width": "80",
+        "silent": True,
+        "noguidance": True,
+    }
+    if interactive:
+        if "EDITOR" not in os.environ:
+            utils.err("$EDITOR environment variable is not set")
+        clufter_args["batch"] = False
+        clufter_args["editor"] = os.environ["EDITOR"]
+    if debug:
+        logging.getLogger("clufter").setLevel(logging.DEBUG)
+    if utils.usefile:
+        clufter_args["cib"] = os.path.abspath(utils.filename)
+    else:
+        clufter_args["cib"] = ("bytestring", utils.get_cib())
+    if verbose:
+        clufter_args["text_width"] = "-1"
+        clufter_args["silent"] = False
+        clufter_args["noguidance"] = False
+    clufter_args_obj = type(str("ClufterOptions"), (object, ), clufter_args)
+    cmd_name = "pcs2pcscmd-flatiron" if utils.is_rhel6() else "pcs2pcscmd-needle"
+
+    # run convertor
+    run_clufter(
+        cmd_name, clufter_args_obj, debug, force,
+        "Error: unable to export cluster configuration"
+    )
+
+    # save commands
+    ok, message = utils.write_file(
+        output_file,
+        clufter_args_obj.output["passout"]
+    )
+    if not ok:
+        utils.err(message)
+
+def run_clufter(cmd_name, cmd_args, debug, force, err_prefix):
+    try:
+        result = None
+        cmd_manager = clufter.command_manager.CommandManager.init_lookup(
+            cmd_name
+        )
+        result = cmd_manager.commands[cmd_name](cmd_args)
+        error_message = ""
+    except Exception as e:
+        error_message = str(e)
+    if error_message or result != 0:
+        hints = []
+        hints.append("--interactive to solve the issues manually")
+        if not debug:
+            hints.append("--debug to get more information")
+        if not force:
+            hints.append("--force to override")
+        hints_string = "\nTry using %s." % ", ".join(hints) if hints else ""
+        sys.stderr.write(
+            err_prefix
+            + (": %s" % error_message if error_message else "")
+            + hints_string
+            + "\n"
+        )
+        sys.exit(1 if result is None else result)
+
diff --git a/pcs/constraint.py b/pcs/constraint.py
index c7950d8..8c027bc 100644
--- a/pcs/constraint.py
+++ b/pcs/constraint.py
@@ -1,13 +1,18 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
 import sys
+import xml.dom.minidom
+from xml.dom.minidom import parseString
+from collections import defaultdict
+
 import usage
 import utils
 import resource
 import rule as rule_utils
-import xml.dom.minidom
-import xml.etree.ElementTree as ET
-from xml.dom.minidom import getDOMImplementation
-from xml.dom.minidom import parseString
-from collections import defaultdict
+
 
 OPTIONS_ACTION = ("start", "promote", "demote", "stop")
 DEFAULT_ACTION = "start"
@@ -97,12 +102,12 @@ def colocation_show(argv):
     (dom,constraintsElement) = getCurrentConstraints()
 
     resource_colocation_sets = []
-    print "Colocation Constraints:"
+    print("Colocation Constraints:")
     for co_loc in constraintsElement.getElementsByTagName('rsc_colocation'):
         if not co_loc.getAttribute("rsc"):
             resource_colocation_sets.append(co_loc)
         else:
-            print "  " + colocation_el_to_string(co_loc, showDetail)
+            print("  " + colocation_el_to_string(co_loc, showDetail))
     print_sets(resource_colocation_sets, showDetail)
 
 def colocation_el_to_string(co_loc, showDetail=False):
@@ -144,7 +149,7 @@ def colocation_rm(argv):
     if elementFound == True:
         utils.replace_cib_configuration(dom)
     else:
-        print "No matching resources found in ordering list"
+        print("No matching resources found in ordering list")
 
 
 # When passed an array of arguments if the first argument doesn't have an '='
@@ -296,7 +301,11 @@ def colocation_set(argv):
             argv[i:] = []
             break
 
-    current_set = set_args_into_array(argv)
+    argv.insert(0, "set")
+    resource_sets = set_args_into_array(argv)
+    if not check_empty_resource_sets(resource_sets):
+        usage.constraint(["colocation set"])
+        sys.exit(1)
     cib, constraints = getCurrentConstraints(utils.get_cib_dom())
 
     attributes = []
@@ -346,7 +355,7 @@ def colocation_set(argv):
     rsc_colocation = cib.createElement("rsc_colocation")
     for name, value in attributes:
         rsc_colocation.setAttribute(name, value)
-    set_add_resource_sets(rsc_colocation, current_set, cib)
+    set_add_resource_sets(rsc_colocation, resource_sets, cib)
     constraints.appendChild(rsc_colocation)
     utils.replace_cib_configuration(cib)
 
@@ -359,12 +368,12 @@ def order_show(argv):
     (dom,constraintsElement) = getCurrentConstraints()
 
     resource_order_sets = []
-    print "Ordering Constraints:"
+    print("Ordering Constraints:")
     for ord_loc in constraintsElement.getElementsByTagName('rsc_order'):
         if not ord_loc.getAttribute("first"):
             resource_order_sets.append(ord_loc)
         else:
-            print "  " + order_el_to_string(ord_loc, showDetail)
+            print("  " + order_el_to_string(ord_loc, showDetail))
     print_sets(resource_order_sets,showDetail)
 
 def order_el_to_string(ord_loc, showDetail=False):
@@ -377,7 +386,12 @@ def order_el_to_string(ord_loc, showDetail=False):
     oc_kind = ord_loc.getAttribute("kind")
     oc_sym = ""
     oc_id_out = ""
-    if ord_loc.getAttribute("symmetrical") == "false":
+    oc_options = ""
+    if (
+        ord_loc.getAttribute("symmetrical")
+        and
+        not utils.is_cib_true(ord_loc.getAttribute("symmetrical"))
+    ):
         oc_sym = "(non-symmetrical)"
     if oc_kind != "":
         score_text = "(kind:" + oc_kind + ")"
@@ -387,16 +401,27 @@ def order_el_to_string(ord_loc, showDetail=False):
         score_text = "(score:" + oc_score + ")"
     if showDetail:
         oc_id_out = "(id:"+oc_id+")"
-    return " ".join(filter(None, [
+    already_processed_attrs = (
+        "first", "then", "first-action", "then-action", "id", "score", "kind",
+        "symmetrical"
+    )
+    oc_options = " ".join([
+        "{0}={1}".format(name, value)
+        for name, value in ord_loc.attributes.items()
+        if name not in already_processed_attrs
+    ])
+    if oc_options:
+        oc_options = "(Options: " + oc_options + ")"
+    return " ".join([arg for arg in [
         first_action, oc_resource1, "then", then_action, oc_resource2,
-        score_text, oc_sym, oc_id_out
-    ]))
+        score_text, oc_sym, oc_options, oc_id_out
+    ] if arg])
 
 def print_sets(sets,showDetail):
     if len(sets) != 0:
-        print "  Resource Sets:"
+        print("  Resource Sets:")
         for ro in sets:
-            print "    " + set_constraint_el_to_string(ro, showDetail)
+            print("    " + set_constraint_el_to_string(ro, showDetail))
 
 def set_constraint_el_to_string(constraint_el, showDetail=False):
     set_list = []
@@ -417,16 +442,23 @@ def set_constraint_el_to_string(constraint_el, showDetail=False):
     return " ".join(set_list + constraint_opts)
 
 def set_args_into_array(argv):
-    current_set = []
-    current_nodes = []
-    for i in range(len(argv)):
-        if argv[i] == "set" and len(argv) >= i:
-            current_set = current_set + set_args_into_array(argv[i+1:])
-            break
-        current_nodes.append(argv[i])
-    current_set = [current_nodes] + current_set
+    all_sets = []
+    current_set = None
+    for elem in argv:
+        if "set" == elem:
+            if current_set is not None:
+                all_sets.append(current_set)
+            current_set = []
+        else:
+            current_set.append(elem)
+    if current_set is not None:
+        all_sets.append(current_set)
+    return all_sets
 
-    return current_set
+def check_empty_resource_sets(sets):
+    if not sets:
+        return False
+    return all(sets)
 
 def set_add_resource_sets(elem, sets, cib):
     allowed_options = {
@@ -504,7 +536,11 @@ def order_set(argv):
             argv[i:] = []
             break
 
-    current_set = set_args_into_array(argv)
+    argv.insert(0, "set")
+    resource_sets = set_args_into_array(argv)
+    if not check_empty_resource_sets(resource_sets):
+        usage.constraint(["order set"])
+        sys.exit(1)
     cib, constraints = getCurrentConstraints(utils.get_cib_dom())
 
     attributes = []
@@ -555,7 +591,7 @@ def order_set(argv):
     rsc_order = cib.createElement("rsc_order")
     for name, value in attributes:
         rsc_order.setAttribute(name, value)
-    set_add_resource_sets(rsc_order, current_set, cib)
+    set_add_resource_sets(rsc_order, resource_sets, cib)
     constraints.appendChild(rsc_order)
     utils.replace_cib_configuration(cib)
 
@@ -730,7 +766,9 @@ def order_add(argv,returnElementOnly=False):
                     "  " + order_el_to_string(dup, True) for dup in duplicates
                 ])
             )
-    print "Adding " + resource1 + " " + resource2 + " ("+scorekind+")" + options
+    print(
+        "Adding " + resource1 + " " + resource2 + " ("+scorekind+")" + options
+    )
 
     if returnElementOnly == False:
         utils.replace_cib_configuration(dom)
@@ -780,7 +818,7 @@ def location_show(argv):
     ruleshash = defaultdict(list)
     all_loc_constraints = constraintsElement.getElementsByTagName('rsc_location')
 
-    print "Location Constraints:"
+    print("Location Constraints:")
     for rsc_loc in all_loc_constraints:
         lc_node = rsc_loc.getAttribute("node")
         lc_rsc = rsc_loc.getAttribute("rsc")
@@ -825,35 +863,35 @@ def location_show(argv):
         else:
             rschash[lc_rsc] = [(lc_id,lc_node,lc_score, lc_role, lc_resource_discovery)]
 
-    nodelist = list(set(nodehashon.keys() + nodehashoff.keys()))
-    rsclist = list(set(rschashon.keys() + rschashoff.keys()))
+    nodelist = list(set(list(nodehashon.keys()) + list(nodehashoff.keys())))
+    rsclist = list(set(list(rschashon.keys()) + list(rschashoff.keys())))
 
     if byNode == True:
         for node in nodelist:
             if len(valid_noderes) != 0:
                 if node not in valid_noderes:
                     continue
-            print "  Node: " + node
-
-            if (node in nodehashon):
-                print "    Allowed to run:"
-                for options in nodehashon[node]:
-                    print "      " + options[1] +  " (" + options[0] + ")",
-                    if (options[3] != ""):
-                        print "(role: "+options[3]+")",
-                    if (options[4] != ""):
-                        print "(resource-discovery="+options[4]+")",
-                    print "Score: "+ options[2]
-
-            if (node in nodehashoff):
-                print "    Not allowed to run:"
-                for options in nodehashoff[node]:
-                    print "      " + options[1] +  " (" + options[0] + ")",
-                    if (options[3] != ""):
-                        print "(role: "+options[3]+")",
-                    if (options[4] != ""):
-                        print "(resource-discovery="+options[4]+")",
-                    print "Score: "+ options[2]
+            print("  Node: " + node)
+
+            nodehash_label = (
+                (nodehashon, "    Allowed to run:")
+                (nodehashoff, "    Not allowed to run:")
+            )
+            for nodehash, label in nodehash_label:
+                if node in nodehash:
+                    print(label)
+                    for options in nodehash[node]:
+                        line_parts = [
+                            "      " + options[1] + " (" + options[0] + ")",
+                        ]
+                        if options[3]:
+                            line_parts.append("(role: {0})".format(options[3]))
+                        if options[4]:
+                            line_parts.append(
+                                "(resource-discovery={0})".format(options[4])
+                            )
+                        line_parts.append("Score: " + options[2])
+                        print(" ".join(line_parts))
         show_location_rules(ruleshash,showDetail)
     else:
         rsclist.sort()
@@ -861,33 +899,30 @@ def location_show(argv):
             if len(valid_noderes) != 0:
                 if rsc not in valid_noderes:
                     continue
-            print "  Resource: " + rsc
-            if (rsc in rschashon):
-                for options in rschashon[rsc]:
-                    if options[1] == "":
-                        continue
-                    print "    Enabled on:",
-                    print options[1],
-                    print "(score:"+options[2]+")",
-                    if (options[3] != ""):
-                        print "(role: "+options[3]+")",
-                    if (options[4] != ""):
-                        print "(resource-discovery="+options[4]+")",
-                    if showDetail:
-                        print "(id:"+options[0]+")",
-                    print
-            if (rsc in rschashoff):
-                for options in rschashoff[rsc]:
-                    print "    Disabled on:",
-                    print options[1],
-                    print "(score:"+options[2]+")",
-                    if (options[3] != ""):
-                        print "(role: "+options[3]+")",
-                    if (options[4] != ""):
-                        print "(resource-discovery="+options[4]+")",
-                    if showDetail:
-                        print "(id:"+options[0]+")",
-                    print 
+            print("  Resource: " + rsc)
+            rschash_label = (
+                (rschashon, "    Enabled on:"),
+                (rschashoff, "    Disabled on:"),
+            )
+            for rschash, label in rschash_label:
+                if rsc in rschash:
+                    for options in rschash[rsc]:
+                        if not options[1]:
+                            continue
+                        line_parts = [
+                            label,
+                            options[1],
+                            "(score:{0})".format(options[2]),
+                        ]
+                        if options[3]:
+                            line_parts.append("(role: {0})".format(options[3]))
+                        if options[4]:
+                            line_parts.append(
+                                "(resource-discovery={0})".format(options[4])
+                            )
+                        if showDetail:
+                            line_parts.append("(id:{0})".format(options[0]))
+                        print(" ".join(line_parts))
             miniruleshash={}
             miniruleshash["Resource: " + rsc] = ruleshash["Resource: " + rsc]
             show_location_rules(miniruleshash,showDetail, True)
@@ -897,7 +932,7 @@ def show_location_rules(ruleshash,showDetail,noheader=False):
     for rsc in ruleshash:
         constrainthash= defaultdict(list)
         if not noheader:
-            print "  " + rsc
+            print("  " + rsc)
         for rule in ruleshash[rsc]:
             constraint_id = rule.parentNode.getAttribute("id")
             constrainthash[constraint_id].append(rule)
@@ -911,11 +946,11 @@ def show_location_rules(ruleshash,showDetail,noheader=False):
             else:
                 constraint_option_info = ""
 
-            print "    Constraint: " + constraint_id + constraint_option_info
+            print("    Constraint: " + constraint_id + constraint_option_info)
             for rule in constrainthash[constraint_id]:
-                print rule_utils.ExportDetailed().get_string(
+                print(rule_utils.ExportDetailed().get_string(
                     rule, showDetail, "      "
-                )
+                ))
 
 def location_prefer(argv):
     rsc = argv.pop(0)
@@ -977,7 +1012,7 @@ def location_add(argv,rm=False):
                 if '=' in arg:
                     options.append(arg.split('=',1))
                 else:
-                    print "Error: bad option '%s'" % arg
+                    print("Error: bad option '%s'" % arg)
                     usage.constraint(["location add"])
                     sys.exit(1)
                 if options[-1][0] != "resource-discovery" and "--force" not in utils.pcs_options:
@@ -1176,7 +1211,7 @@ def constraint_rm(argv,returnStatus=False, constraintsElement=None, passed_dom=N
         if returnStatus:
             return True
     else:
-        print >> sys.stderr, "Error: Unable to find constraint - '%s'" % c_id
+        utils.err("Unable to find constraint - '%s'" % c_id, False)
         if returnStatus:
             return False
         sys.exit(1)
@@ -1188,21 +1223,21 @@ def constraint_ref(argv):
         sys.exit(1)
 
     for arg in argv:
-        print "Resource: %s" % arg
+        print("Resource: %s" % arg)
         constraints,set_constraints = find_constraints_containing(arg)
         if len(constraints) == 0 and len(set_constraints) == 0:
-            print "  No Matches."
+            print("  No Matches.")
         else:
             for constraint in constraints:
-                print "  " + constraint
+                print("  " + constraint)
             for constraint in set_constraints:
-                print "  " + constraint
+                print("  " + constraint)
 
 def remove_constraints_containing(resource_id,output=False,constraints_element = None, passed_dom=None):
     constraints,set_constraints = find_constraints_containing(resource_id, passed_dom)
     for c in constraints:
         if output == True:
-            print "Removing Constraint - " + c
+            print("Removing Constraint - " + c)
         if constraints_element != None:
             constraint_rm([c], True, constraints_element, passed_dom=passed_dom)
         else:
@@ -1218,14 +1253,14 @@ def remove_constraints_containing(resource_id,output=False,constraints_element =
                 pn = c.parentNode
                 pn.removeChild(c)
                 if output == True:
-                    print "Removing %s from set %s" % (resource_id,pn.getAttribute("id"))
+                    print("Removing %s from set %s" % (resource_id,pn.getAttribute("id")))
                 if pn.getElementsByTagName("resource_ref").length == 0:
-                    print "Removing set %s" % pn.getAttribute("id")
+                    print("Removing set %s" % pn.getAttribute("id"))
                     pn2 = pn.parentNode
                     pn2.removeChild(pn)
                     if pn2.getElementsByTagName("resource_set").length == 0:
                         pn2.parentNode.removeChild(pn2)
-                        print "Removing constraint %s" % pn2.getAttribute("id")
+                        print("Removing constraint %s" % pn2.getAttribute("id"))
         if passed_dom:
             return dom
         utils.replace_cib_configuration(dom)
@@ -1277,7 +1312,7 @@ def find_constraints_containing(resource_id, passed_dom=None):
 def remove_constraints_containing_node(dom, node, output=False):
     for constraint in find_constraints_containing_node(dom, node):
         if output:
-            print "Removing Constraint - %s" % constraint.getAttribute("id")
+            print("Removing Constraint - %s" % constraint.getAttribute("id"))
         constraint.parentNode.removeChild(constraint)
     return dom
 
@@ -1345,19 +1380,21 @@ def constraint_rule(argv):
         cib = utils.get_cib_etree()
         temp_id = argv.pop(0)
         constraints = cib.find('.//constraints')
-        loc_cons = cib.findall('.//rsc_location')
+        loc_cons = cib.findall(str('.//rsc_location'))
 
-        rules = cib.findall('.//rule')
+        rules = cib.findall(str('.//rule'))
         for loc_con in loc_cons:
             for rule in loc_con:
                 if rule.get("id") == temp_id:
                     if len(loc_con) > 1:
-                        print "Removing Rule:",rule.get("id")
+                        print("Removing Rule: {0}".format(rule.get("id")))
                         loc_con.remove(rule)
                         found = True
                         break
                     else:
-                        print "Removing Constraint:",loc_con.get("id") 
+                        print(
+                            "Removing Constraint: {0}".format(loc_con.get("id"))
+                        )
                         constraints.remove(loc_con)
                         found = True
                         break
diff --git a/pcs/corosync.conf.fedora.template b/pcs/corosync.conf.fedora.template
deleted file mode 100644
index 4d1227c..0000000
--- a/pcs/corosync.conf.fedora.template
+++ /dev/null
@@ -1,22 +0,0 @@
-totem {
-version: 2
-secauth: off
-cluster_name: @@cluster_name
-transport: @@transport
-@@totem_options
-@@interfaceandrrpmode
-@@ip_version
-}
-
-nodelist {
-@@nodes}
-
-quorum {
-provider: corosync_votequorum
-@@quorum_options
-@@two_node
-}
-
-logging {
-to_syslog: yes
-}
diff --git a/pcs/corosync.conf.template b/pcs/corosync.conf.template
deleted file mode 100644
index 73a5a1c..0000000
--- a/pcs/corosync.conf.template
+++ /dev/null
@@ -1,39 +0,0 @@
-# Please read the corosync.conf.5 manual page
-compatibility: whitetank
-
-totem {
-	version: 2
-	secauth: off
-	threads: 0
-	interface {
-		ringnumber: 0
-		bindnetaddr: @@bindnetaddr
-		mcastaddr: @@mcastaddr
-		mcastport: @@mcastport
-		ttl: 1
-	}
-}
-
-logging {
-	fileline: off
-	to_stderr: no
-	to_logfile: yes
-	to_syslog: yes
-	logfile: /var/log/cluster/corosync.log
-	debug: off
-	timestamp: on
-	logger_subsys {
-		subsys: AMF
-		debug: off
-	}
-}
-
-amf {
-	mode: disabled
-}
-
-service {
-name:pacemaker
-ver: 1
-}
-
diff --git a/pcs/corosync_conf.py b/pcs/corosync_conf.py
new file mode 100644
index 0000000..1debc39
--- /dev/null
+++ b/pcs/corosync_conf.py
@@ -0,0 +1,153 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+
+class Section(object):
+
+    def __init__(self, name):
+        self._parent = None
+        self._attr_list = []
+        self._section_list = []
+        self._name = name
+
+    @property
+    def parent(self):
+        return self._parent
+
+    @property
+    def name(self):
+        return self._name
+
+    def export(self, indent="    "):
+        lines = []
+        for attr in self._attr_list:
+            lines.append("{0}: {1}".format(*attr))
+        if self._attr_list and self._section_list:
+            lines.append("")
+        section_count = len(self._section_list)
+        for index, section in enumerate(self._section_list, 1):
+            lines.extend(str(section).split("\n"))
+            if not lines[-1].strip():
+                del lines[-1]
+            if index < section_count:
+                lines.append("")
+        if self.parent:
+            lines = [indent + x if x else x for x in lines]
+            lines.insert(0, self.name + " {")
+            lines.append("}")
+        final = "\n".join(lines)
+        if final:
+            final += "\n"
+        return final
+
+    def get_root(self):
+        parent = self
+        while parent.parent:
+            parent = parent.parent
+        return parent
+
+    def get_attributes(self, name=None):
+        return [
+            attr for attr in self._attr_list if name is None or attr[0] == name
+        ]
+
+    def add_attribute(self, name, value):
+        self._attr_list.append([name, value])
+        return self
+
+    def del_attribute(self, attribute):
+        self._attr_list = [
+            attr for attr in self._attr_list if attr != attribute
+        ]
+        return self
+
+    def del_attributes_by_name(self, name, value=None):
+        self._attr_list = [
+            attr for attr in self._attr_list
+                if not(attr[0] == name and (value is None or attr[1] == value))
+        ]
+        return self
+
+    def set_attribute(self, name, value):
+        found = False
+        new_attr_list = []
+        for attr in self._attr_list:
+            if attr[0] != name:
+                new_attr_list.append(attr)
+            elif not found:
+                found = True
+                attr[1] = value
+                new_attr_list.append(attr)
+        self._attr_list = new_attr_list
+        if not found:
+            self.add_attribute(name, value)
+        return self
+
+    def get_sections(self, name=None):
+        return [
+            section for section in self._section_list
+                if name is None or section.name == name
+        ]
+
+    def add_section(self, section):
+        parent = self
+        while parent:
+            if parent == section:
+                raise CircularParentshipException()
+            parent = parent.parent
+        if section.parent:
+            section.parent.del_section(section)
+        section._parent = self
+        self._section_list.append(section)
+        return self
+
+    def del_section(self, section):
+        self._section_list.remove(section)
+        # don't set parent to None if the section was not found in the list
+        # thanks to remove raising a ValueError in that case
+        section._parent = None
+        return self
+
+    def __str__(self):
+        return self.export()
+
+
+def parse_string(conf_text):
+    root = Section("")
+    _parse_section(conf_text.split("\n"), root)
+    return root
+
+def _parse_section(lines, section):
+    # parser is trying to work the same way as an original corosync parser
+    while lines:
+        current_line = lines.pop(0).strip()
+        if not current_line or current_line[0] == "#":
+            continue
+        if "{" in current_line:
+            section_name, junk = current_line.rsplit("{", 1)
+            new_section = Section(section_name.strip())
+            section.add_section(new_section)
+            _parse_section(lines, new_section)
+        elif "}" in current_line:
+            if not section.parent:
+                raise ParseErrorException("Unexpected closing brace")
+            return
+        elif ":" in current_line:
+            section.add_attribute(
+                *[x.strip() for x in current_line.split(":", 1)]
+            )
+    if section.parent:
+        raise ParseErrorException("Missing closing brace")
+
+
+class CorosyncConfException(Exception):
+    pass
+
+class CircularParentshipException(CorosyncConfException):
+    pass
+
+class ParseErrorException(CorosyncConfException):
+    pass
+
diff --git a/pcs/node.py b/pcs/node.py
new file mode 100644
index 0000000..06396f6
--- /dev/null
+++ b/pcs/node.py
@@ -0,0 +1,112 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import sys
+
+import usage
+import utils
+
+
+def node_cmd(argv):
+    if len(argv) == 0:
+        usage.node()
+        sys.exit(1)
+
+    sub_cmd = argv.pop(0)
+    if sub_cmd == "help":
+        usage.node(argv)
+    elif sub_cmd == "maintenance":
+        node_maintenance(argv)
+    elif sub_cmd == "unmaintenance":
+        node_maintenance(argv, False)
+    elif sub_cmd == "utilization":
+        if len(argv) == 0:
+            print_nodes_utilization()
+        elif len(argv) == 1:
+            print_node_utilization(argv.pop(0))
+        else:
+            set_node_utilization(argv.pop(0), argv)
+    else:
+        usage.node()
+        sys.exit(1)
+
+
+def node_maintenance(argv, on=True):
+    action = ["-v", "on"] if on else ["-D"]
+
+    cluster_nodes = utils.getNodesFromPacemaker()
+    nodes = []
+    failed_count = 0
+    if "--all" in utils.pcs_options:
+        nodes = cluster_nodes
+    elif argv:
+        for node in argv:
+            if node not in cluster_nodes:
+                utils.err(
+                    "Node '%s' does not appear to exist in configuration" %
+                    argv[0],
+                    False
+                )
+                failed_count += 1
+            else:
+                nodes.append(node)
+    else:
+        nodes.append("")
+
+    for node in nodes:
+        node = ["-N", node] if node else []
+        output, retval = utils.run(
+            ["crm_attribute", "-t", "nodes", "-n", "maintenance"] + action +
+            node
+        )
+        if retval != 0:
+            node_name = ("node '%s'" % node) if argv else "current node"
+            failed_count += 1
+            if on:
+                utils.err(
+                    "Unable to put %s to maintenance mode.\n%s" %
+                    (node_name, output),
+                    False
+                )
+            else:
+                utils.err(
+                    "Unable to remove %s from maintenance mode.\n%s" %
+                    (node_name, output),
+                    False
+                )
+    if failed_count > 0:
+        sys.exit(1)
+
+def set_node_utilization(node, argv):
+    cib = utils.get_cib_dom()
+    node_el = utils.dom_get_node(cib, node)
+    if node_el is None:
+        utils.err("Unable to find a node: {0}".format(node))
+
+    utils.dom_update_utilization(
+        node_el, utils.convert_args_to_tuples(argv), "nodes-"
+    )
+    utils.replace_cib_configuration(cib)
+
+def print_node_utilization(node):
+    cib = utils.get_cib_dom()
+    node_el = utils.dom_get_node(cib, node)
+    if node_el is None:
+        utils.err("Unable to find a node: {0}".format(node))
+    utilization = utils.get_utilization_str(node_el)
+
+    print("Node Utilization:")
+    print(" {0}: {1}".format(node, utilization))
+
+def print_nodes_utilization():
+    cib = utils.get_cib_dom()
+    utilization = {}
+    for node_el in cib.getElementsByTagName("node"):
+        u = utils.get_utilization_str(node_el)
+        if u:
+           utilization[node_el.getAttribute("uname")] = u
+    print("Node Utilization:")
+    for node in sorted(utilization):
+        print(" {0}: {1}".format(node, utilization[node]))
diff --git a/pcs/pcs.8 b/pcs/pcs.8
index b3f2af8..1f15b18 100644
--- a/pcs/pcs.8
+++ b/pcs/pcs.8
@@ -1,4 +1,4 @@
-.TH PCS "8" "February 2015" "pcs 0.9.139" "System Administration Utilities"
+.TH PCS "8" "December 2015" "pcs 0.9.148" "System Administration Utilities"
 .SH NAME
 pcs \- pacemaker/corosync configuration system
 .SH SYNOPSIS
@@ -44,6 +44,12 @@ View cluster status
 .TP
 config
 View and manage cluster configuration
+.TP
+pcsd
+Manage pcs daemon
+.TP
+node
+Manage cluster nodes
 .SS "resource"
 .TP
 show [resource id] [\fB\-\-full\fR] [\fB\-\-groups\fR]
@@ -56,31 +62,45 @@ describe <standard:provider:type|type>
 Show options for the specified resource
 .TP
 create <resource id> <standard:provider:type|type> [resource options] [op <operation action> <operation options> [<operation action> <operation options>]...] [meta <meta options>...] [\fB\-\-clone\fR <clone options> | \fB\-\-master\fR <master options> | \fB\-\-group\fR <group name> [\fB\-\-before\fR <resource id> | \fB\-\-after\fR <resource id>]] [\fB\-\-disabled\fR] [\fB\-\-wait\fR[=n]]
-Create specified resource.  If \fB\-\-clone\fR is used a clone resource is created if \fB\-\-master\fR is specified a master/slave resource is created.  If \fB\-\-group\fR is specified the resource is added to the group named.  You can use \fB\-\-before\fR or \fB\-\-after\fR to specify the position of the added resource relatively to some resource already existing in the group.  If \fB\-\-disabled\fR is specified the resource is not started automatically.  If \fB\-\-wait\fR is specified, [...]
+Create specified resource.  If \fB\-\-clone\fR is used a clone resource is created if \fB\-\-master\fR is specified a master/slave resource is created.  If \fB\-\-group\fR is specified the resource is added to the group named.  You can use \fB\-\-before\fR or \fB\-\-after\fR to specify the position of the added resource relatively to some resource already existing in the group.  If \fB\-\-disabled\fR is specified the resource is not started automatically.  If \fB\-\-wait\fR is specified, [...]
+
+Example: Create a new resource called 'VirtualIP' with IP address 192.168.0.99, netmask of 32, monitored everything 30 seconds, on eth2: pcs resource create VirtualIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 nic=eth2 op monitor interval=30s
 .TP
 delete <resource id|group id|master id|clone id>
 Deletes the resource, group, master or clone (and all resources within the group/master/clone).
 .TP
 enable <resource id> [\fB\-\-wait\fR[=n]]
-Allow the cluster to start the resource. Depending on the rest of the configuration (constraints, options, failures, etc), the resource may remain stopped.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds (or resource timeout seconds) for the resource to start and then return 0 if the resource is started, or 1 if the resource has not yet started.
+Allow the cluster to start the resource. Depending on the rest of the configuration (constraints, options, failures, etc), the resource may remain stopped.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the resource to start and then return 0 if the resource is started, or 1 if the resource has not yet started.  If 'n' is not specified it defaults to 60 minutes.
 .TP
 disable <resource id> [\fB\-\-wait\fR[=n]]
-Attempt to stop the resource if it is running and forbid the cluster from starting it again.  Depending on the rest of the configuration (constraints, options, failures, etc), the resource may remain started.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds (or resource timeout seconds) for the resource to stop and then return 0 if the resource is stopped or 1 if the resource has not stopped.
+Attempt to stop the resource if it is running and forbid the cluster from starting it again.  Depending on the rest of the configuration (constraints, options, failures, etc), the resource may remain started.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the resource to stop and then return 0 if the resource is stopped or 1 if the resource has not stopped.  If 'n' is not specified it defaults to 60 minutes.
 .TP
 restart <resource id> [node] [\fB\-\-wait\fR=n]
 Restart the resource specified. If a node is specified and if the resource is a clone or master/slave it will be restarted only on the node specified.  If \fB\-\-wait\fR is specified, then we will wait up to 'n' seconds for the resource to be restarted and return 0 if the restart was successful or 1 if it was not.
 .TP
 debug\-start <resource id> [\fB\-\-full\fR]
-This command will force the specified resource to start on this node ignoring the cluster recommendations and print the output from starting the resource. Using \fB\-\-full\fR will give more detailed output. This is mainly used for debugging resources that fail to start.
+This command will force the specified resource to start on this node ignoring the cluster recommendations and print the output from starting the resource.  Using \fB\-\-full\fR will give more detailed output.  This is mainly used for debugging resources that fail to start.
+.TP
+debug\-stop <resource id> [\fB\-\-full\fR]
+This command will force the specified resource to stop on this node ignoring the cluster recommendations and print the output from stopping the resource.  Using \fB\-\-full\fR will give more detailed output.  This is mainly used for debugging resources that fail to stop.
+.TP
+debug\-promote <resource id> [\fB\-\-full\fR]
+This command will force the specified resource to be promoted on this node ignoring the cluster recommendations and print the output from promoting the resource.  Using \fB\-\-full\fR will give more detailed output.  This is mainly used for debugging resources that fail to promote.
+.TP
+debug\-demote <resource id> [\fB\-\-full\fR]
+This command will force the specified resource to be demoted on this node ignoring the cluster recommendations and print the output from demoting the resource.  Using \fB\-\-full\fR will give more detailed output.  This is mainly used for debugging resources that fail to demote.
+.TP
+debug\-monitor <resource id> [\fB\-\-full\fR]
+This command will force the specified resource to be moniored on this node ignoring the cluster recommendations and print the output from monitoring the resource.  Using \fB\-\-full\fR will give more detailed output.  This is mainly used for debugging resources that fail to be monitored.
 .TP
 move <resource id> [destination node] [\fB\-\-master\fR] [lifetime=<lifetime>] [\fB\-\-wait\fR[=n]]
-Move resource off current node (and optionally onto destination node).  If \fB\-\-master\fR is used the scope of the command is limited to the master role and you must use the master id (instead of the resource id).  If lifetime is not specified it defaults to infinite.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the resource to start on destination node and then return 0 if the resource is started, or 1 if the resource has not yet started.  If 'n' is not specifi [...]
+Move the resource off the node it is currently running on by creating a \-INFINITY location constraint to ban the node.  If destination node is specified the resource will be moved to that node by creating an INFINITY location constraint to prefer the destination node.  If \fB\-\-master\fR is used the scope of the command is limited to the master role and you must use the master id (instead of the resource id).  If lifetime is specified then the constraint will expire after that time, ot [...]
 .TP
 ban <resource id> [node] [\fB\-\-master\fR] [lifetime=<lifetime>] [\fB\-\-wait\fR[=n]]
-Prevent the resource id specified from running on the node (or on the current node it is running on if no node is specified).  If \fB\-\-master\fR is used the scope of the command is limited to the master role and you must use the master id (instead of the resource id).  If lifetime is not specified it defaults to infinite.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the resource to start on different node and then return 0 if the resource is started, or 1 if the [...]
+Prevent the resource id specified from running on the node (or on the current node it is running on if no node is specified) by creating a \-INFINITY location constraint.  If \fB\-\-master\fR is used the scope of the command is limited to the master role and you must use the master id (instead of the resource id).  If lifetime is specified then the constraint will expire after that time, otherwise it defaults to infinity and the constraint can be cleared manually with 'pcs resource clear [...]
 .TP
-clear <resource id> [node] [\fB\-\-master\fR] [\fB\-\-wait\fR=n]
-Remove constraints created by move and/or ban on the specified resource (and node if specified). If \fB\-\-master\fR is used the scope of the command is limited to the master role and you must use the master id (instead of the resource id).  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for resources to start / move depending on the effect of removing the constraints and then return 0 if resources are started on target nodes, or 1 if resources have not yet started / mov [...]
+clear <resource id> [node] [\fB\-\-master\fR] [\fB\-\-wait\fR[=n]]
+Remove constraints created by move and/or ban on the specified resource (and node if specified). If \fB\-\-master\fR is used the scope of the command is limited to the master role and you must use the master id (instead of the resource id).  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including starting and/or moving resources if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
 .TP
 standards
 List available resource agent standards supported by this installation. (OCF, LSB, etc.)
@@ -92,7 +112,7 @@ agents [standard[:provider]]
 List available agents optionally filtered by standard and provider
 .TP
 update <resource id> [resource options] [op [<operation action> <operation options>]...] [meta <meta operations>...] [\fB\-\-wait\fR[=n]]
-Add/Change options to specified resource, clone or multi\-state resource.  If an operation (op) is specified it will update the first found operation with the same action on the specified resource, if no operation with that action exists then a new operation will be created (WARNING: all current options on the update op will be reset if not specified). If you want to create multiple monitor operations you should use the add_operation & remove_operation commands.  If \fB\-\-wait\fR is spe [...]
+Add/Change options to specified resource, clone or multi\-state resource.  If an operation (op) is specified it will update the first found operation with the same action on the specified resource, if no operation with that action exists then a new operation will be created.  (WARNING: all existing options on the updated operation will be reset if not specified.)  If you want to create multiple monitor operations you should use the 'op add' & 'op remove' commands.  If \fB\-\-wait\fR is s [...]
 .TP
 op add <resource id> <operation action> [operation properties]
 Add operation for specified resource
@@ -107,27 +127,25 @@ op defaults [options]
 Set default values for operations, if no options are passed, lists currently configured defaults
 .TP
 meta <resource id | group id | master id | clone id> <meta options> [\fB\-\-wait\fR[=n]]
-Add specified options to the specified resource, group, master/slave or clone.  Meta options should be in the format of name=value, options may be removed by setting an option without a value.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the changes to take effect and then return 0 if the changes have been processed or 1 otherwise.  If 'n' is not specified, default resource timeout will be used.  Example: pcs resource meta TestResource failure\-timeout=50 stickiness=
+Add specified options to the specified resource, group, master/slave or clone.  Meta options should be in the format of name=value, options may be removed by setting an option without a value.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the changes to take effect and then return 0 if the changes have been processed or 1 otherwise.  If 'n' is not specified it defaults to 60 minutes.  Example: pcs resource meta TestResource failure\-timeout=50 stickiness=
 .TP
 group add <group name> <resource id> [resource id] ... [resource id] [\fB\-\-before\fR <resource id> | \fB\-\-after\fR <resource id>] [\fB\-\-wait\fR[=n]]
-Add the specified resource to the group, creating the group if it does not exist.  If the resource is present in another group it is moved to the new group.  You can use \fB\-\-before\fR or \fB\-\-after\fR to specify the position of the added resources relatively to some resource already existing in the group.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for resources to move depending on the effect of grouping and then return 0 if the resources are moved, or 1 if the [...]
+Add the specified resource to the group, creating the group if it does not exist.  If the resource is present in another group it is moved to the new group.  You can use \fB\-\-before\fR or \fB\-\-after\fR to specify the position of the added resources relatively to some resource already existing in the group.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including moving resources if appropriate) and then return 0 on success or 1 on error. [...]
 .TP
 group remove <group name> <resource id> [resource id] ... [resource id] [\fB\-\-wait\fR[=n]]
-Remove the specified resource(s) from the group, removing the group if it no resources remain.
-Remove the specified resource(s) from the group, removing the group if it no resources remain.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for specified resources to move depending of the effect of ungrouping and the return 0 if resources are moved to target nodes, or 1 if resources have not yet moved.  If 'n' is not specified, default resource timeout will be used.
+Remove the specified resource(s) from the group, removing the group if it no resources remain.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including moving resources if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
 .TP
 ungroup <group name> [resource id] ... [resource id] [\fB\-\-wait\fR[=n]]
-Remove the group (Note: this does not remove any resources from the cluster) or if resources are specified, remove the specified resources from the group
-Remove the group (Note: this does not remove any resources from the cluster) or if resources are specified, remove the specified resources from the group.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for specified resources (all group resources if no resource specified) to move depending of the effect of ungrouping and the return 0 if resources are moved to target nodes, or 1 if resources have not yet moved.  If 'n' is not specified, default resource timeout will be used.
+Remove the group (Note: this does not remove any resources from the cluster) or if resources are specified, remove the specified resources from the group.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including moving resources if appropriate) and the return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
 .TP
 clone <resource id | group id> [clone options]... [\fB\-\-wait\fR[=n]]
-Setup up the specified resource or group as a clone.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the resource clones to start and then return 0 if the clones are started, or 1 if the clones has not yet started.  If 'n' is not specified, default resource timeout will be used.
+Setup up the specified resource or group as a clone.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including starting clone instances if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
 .TP
 unclone <resource id | group name> [\fB\-\-wait\fR[=n]]
-Remove the clone which contains the specified group or resource (the resource or group will not be removed).  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the resource clones to stop and then return 0 if the resource is running as one instance, or 1 if the resource clones has not yet stopped.  If 'n' is not specified, default resource timeout will be used.
+Remove the clone which contains the specified group or resource (the resource or group will not be removed).  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including stopping clone instances if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
 .TP
 master [<master/slave name>] <resource id | group name> [options] [\fB\-\-wait\fR[=n]]
-Configure a resource or group as a multi\-state (master/slave) resource.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the resource to be promoted and then return 0 if the resource is promoted, or 1 if the resource has not yet been promoted.  If 'n' is not specified, default resource timeout will be used.  Note: to remove a master you must remove the resource/group it contains.
+Configure a resource or group as a multi\-state (master/slave) resource.  If \fB\-\-wait\fR is specified, pcs will wait up to 'n' seconds for the operation to finish (including starting and promoting resource instances if appropriate) and then return 0 on success or 1 on error.  If 'n' is not specified it defaults to 60 minutes.  Note: to remove a master you must remove the resource/group it contains.
 .TP
 manage <resource id> ... [resource n]
 Set resources listed to managed mode (default)
@@ -139,20 +157,35 @@ defaults [options]
 Set default values for resources, if no options are passed, lists currently configured defaults
 .TP
 cleanup [<resource id>]
-Cleans up the resource in the lrmd (useful to reset the resource status and failcount). This tells the cluster to forget the operation history of a resource and re-detect its current state. This can be useful to purge knowledge of past failures that have since been resolved. If resource id is not specified then all resources/stonith devices will be cleaned up.
+Cleans up the resource in the lrmd (useful to reset the resource status and failcount). This tells the cluster to forget the operation history of a resource and re-detect its current state. This can be useful to purge knowledge of past failures that have since been resolved. If a resource id is not specified then all resources/stonith devices will be cleaned up.
 .TP
 failcount show <resource id> [node]
 Show current failcount for specified resource from all nodes or only on specified node
 .TP
 failcount reset <resource id> [node]
 Reset failcount for specified resource on all nodes or only on specified node. This tells the cluster to forget how many times a resource has failed in the past.  This may allow the resource to be started or moved to a more preferred location.
+.TP
+relocate dry-run [resource1] [resource2] ...
+The same as 'relocate run' but has no effect on the cluster.
+.TP
+relocate run [resource1] [resource2] ...
+Relocate specified resources to their preferred nodes.  If no resources are specified, relocate all resources.  This command calculates the preferred node for each resource while ignoring resource stickiness.  Then it creates location constraints which will cause the resources to move to their preferred nodes.  Once the resources have been moved the constraints are deleted automatically.  Note that the preferred node is calculated based on current cluster status, constraints, location of [...]
+.TP
+relocate show
+Display current status of resources and their optimal node ignoring resource stickiness.
+.TP
+relocate clear
+Remove all constraints created by the 'relocate run' command.
+.TP
+utilization [<resource id> [<name>=<value> ...]]
+Add specified utilization options to specified resource. If resource is not specified, shows utilization of all resources. If utilization options are not specified, shows utilization of specified resource. Utilization option should be in format name=value, value has to be integer. Options may be removed by setting an option without a value. Example: pcs resource utilization TestResource cpu= ram=20
 .SS "cluster"
 .TP
-auth [node] [...] [\fB\-u\fR username] [\fB\-p\fR password] [\fB\-\-local\fR] [\fB\-\-force\fR]
+auth [node] [...] [\fB\-u\fR username] [\fB\-p\fR password] [\fB\-\-force\fR] [\fB\-\-local\fR]
 Authenticate pcs to pcsd on nodes specified, or on all nodes configured in corosync.conf if no nodes are specified (authorization tokens are stored in ~/.pcs/tokens or /var/lib/pcsd/tokens for root).  By default all nodes are also authenticated to each other, using \fB\-\-local\fR only authenticates the local node (and does not authenticate the remote nodes with each other).  Using \fB\-\-force\fR forces re-authentication to occur.
 .TP
-setup [\fB\-\-start\fR] [\fB\-\-local\fR] [\fB\-\-enable\fR] \fB\-\-name\fR <cluster name> <node1[,node1-altaddr]> [node2[,node2-altaddr]] [..] [\fB\-\-transport\fR <udpu|udp>] [\fB\-\-rrpmode\fR active|passive] [\fB\-\-addr0\fR <addr/net> [[[\fB\-\-mcast0\fR <address>] [\fB\-\-mcastport0\fR <port>] [\fB\-\-ttl0\fR <ttl>]] | [\fB\-\-broadcast0\fR]] [\fB\-\-addr1\fR <addr/net> [[[\fB\-\-mcast1\fR <address>] [\fB\-\-mcastport1\fR <port>] [\fB\-\-ttl1\fR <ttl>]] | [\fB\-\-broadcast1\fR]]]]  [...]
-Configure corosync and sync configuration out to listed nodes. \fB\-\-local\fR will only perform changes on the local node, \fB\-\-start\fR will also start the cluster on the specified nodes, \fB\-\-enable\fR will enable corosync and pacemaker on node startup, \fB\-\-transport\fR allows specification of corosync transport (default: udpu), \fB\-\-rrpmode\fR allows you to set the RRP mode of the system. Currently only 'passive' is supported or tested (using 'active' is not recommended). Th [...]
+setup [\fB\-\-start\fR] [\fB\-\-local\fR] [\fB\-\-enable\fR] \fB\-\-name\fR <cluster name> <node1[,node1-altaddr]> [node2[,node2-altaddr]] [..] [\fB\-\-transport\fR <udpu|udp>] [\fB\-\-rrpmode\fR active|passive] [\fB\-\-addr0\fR <addr/net> [[[\fB\-\-mcast0\fR <address>] [\fB\-\-mcastport0\fR <port>] [\fB\-\-ttl0\fR <ttl>]] | [\fB\-\-broadcast0\fR]] [\fB\-\-addr1\fR <addr/net> [[[\fB\-\-mcast1\fR <address>] [\fB\-\-mcastport1\fR <port>] [\fB\-\-ttl1\fR <ttl>]] | [\fB\-\-broadcast1\fR]]]]  [...]
+Configure corosync and sync configuration out to listed nodes. \fB\-\-local\fR will only perform changes on the local node, \fB\-\-start\fR will also start the cluster on the specified nodes, \fB\-\-enable\fR will enable corosync and pacemaker on node startup, \fB\-\-transport\fR allows specification of corosync transport (default: udpu; udp for CMAN clusters), \fB\-\-rrpmode\fR allows you to set the RRP mode of the system. Currently only 'passive' is supported or tested (using 'active'  [...]
 
 \fB\-\-ipv6\fR will configure corosync to use ipv6 (instead of ipv4)
 
@@ -171,8 +204,8 @@ Configure corosync and sync configuration out to listed nodes. \fB\-\-local\fR w
 
 Configuring Redundant Ring Protocol (RRP)
 
-When using udpu (the default) specifying nodes, specify the ring 0
-address first followed by a ',' and then the ring 1 address.
+When using udpu specifying nodes, specify the ring 0 address first
+followed by a ',' and then the ring 1 address.
 
 Example: pcs cluster setup \-\-name cname nodeA-0,nodeA-1 nodeB-0,nodeB-1
 
@@ -217,9 +250,6 @@ View current cluster status (an alias of 'pcs status cluster')
 pcsd\-status [node] [...]
 Get current status of pcsd on nodes specified, or on all nodes configured in corosync.conf if no nodes are specified
 .TP
-certkey <certificate file> <key file>
-Load custom certificate and key files for use in pcsd
-.TP
 sync
 Sync corosync configuration to all nodes found from current corosync.conf file (cluster.conf on systems running Corosync 1.x)
 .TP
@@ -230,10 +260,10 @@ cib [filename] [scope=<scope> | \fB\-\-config\fR]
 Get the raw xml from the CIB (Cluster Information Base).  If a filename is provided, we save the cib to that file, otherwise the cib is printed.  Specify scope to get a specific section of the CIB.  Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults, status.  \fB\-\-config\fR is the same as scope=configuration.  Use of \fB\-\-config\fR is recommended.  Do not specify a scope if you need to get the whole CIB or be warned in t [...]
 .TP
 cib-push <filename> [scope=<scope> | \fB\-\-config\fR]
-Push the raw xml from <filename> to the CIB (Cluster Information Base).  Specify scope to push a specific section of the CIB.  Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults.  \fB\-\-config\fR is the same as scope=configuration.  Use of \fB\-\-config\fR is recommended.  Do not specify a scope if you need to push the whole CIB or be warned in the case of outdated CIB.
+Push the raw xml from <filename> to the CIB (Cluster Information Base).  You can obtain the CIB by running the 'pcs cluster cib' command, which is recommended first step when you want to perform desired modifications (pcs \fB\-f\fR <command>) for the one-off push.  Specify scope to push a specific section of the CIB.  Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults.  \fB\-\-config\fR is the same as scope=configuration.  U [...]
 .TP
 cib\-upgrade
-Upgrade the cib to the latest version
+Upgrade the CIB to conform to the latest version of the document schema
 .TP
 edit [scope=<scope> | \fB\-\-config\fR]
 Edit the cib in the editor specified by the $EDITOR environment variable and push out any changes upon saving.  Specify scope to edit a specific section of the CIB.  Valid values of the scope are: configuration, nodes, resources, constraints, crm_config, rsc_defaults, op_defaults.  \fB\-\-config\fR is the same as scope=configuration.  Use of \fB\-\-config\fR is recommended.  Do not specify a scope if you need to edit the whole CIB or be warned in the case of outdated CIB.
@@ -263,22 +293,22 @@ destroy [\fB\-\-all\fR]
 Permanently destroy the cluster on the current node, killing all corosync/pacemaker processes removing all cib files and the corosync.conf file.  Using \fB\-\-all\fR will attempt to destroy the cluster on all nodes configure in the corosync.conf file.  WARNING: This command permantly removes any cluster configuration that has been created. It is recommended to run 'pcs cluster stop' before destroying the cluster.
 .TP
 verify [\fB\-V\fR] [filename]
-Checks the pacemaker configuration (cib) for syntax and common conceptual errors.  If no filename is specified the check is performmed on the currently running cluster.  If \fB\-V\fR is used more verbose output will be printed
+Checks the pacemaker configuration (cib) for syntax and common conceptual errors.  If no filename is specified the check is performed on the currently running cluster.  If \fB\-V\fR is used more verbose output will be printed
 .TP
 report [\fB\-\-from\fR "YYYY\-M\-D H:M:S" [\fB\-\-to\fR "YYYY\-M\-D" H:M:S"]] dest
-Create a tarball containing everything needed when reporting cluster problems.  If \fB\-\-from\fR and \fB\-\-to\fR are not used, the report will include the past 24 hours
+Create a tarball containing everything needed when reporting cluster problems.  If \fB\-\-from\fR and \fB\-\-to\fR are not used, the report will include the past 24 hours.
 .SS "stonith"
 .TP
 show [stonith id] [\fB\-\-full\fR]
 Show all currently configured stonith devices or if a stonith id is specified show the options for the configured stonith device.  If \fB\-\-full\fR is specified all configured stonith options will be displayed
 .TP
 list [filter] [\fB\-\-nodesc\fR]
-Show list of all available stonith agents (if filter is provided then only stonith agents matching the filter will be shown). If \fB\-\-nodesc\fR is used then descriptions of stontih agents are not printed.
+Show list of all available stonith agents (if filter is provided then only stonith agents matching the filter will be shown). If \fB\-\-nodesc\fR is used then descriptions of stonith agents are not printed.
 .TP
 describe <stonith agent>
 Show options for specified stonith agent
 .TP
-create <stonith id> <stonith device type> [stonith device options]
+create <stonith id> <stonith device type> [stonith device options] [op <operation action> <operation options> [<operation action> <operation options>]...] [meta <meta options>...]
 Create stonith device with specified type and options
 .TP
 update <stonith id> [stonith device options]
@@ -309,7 +339,9 @@ fence <node> [\fB\-\-off\fR]
 Fence the node specified (if \fB\-\-off\fR is specified, use the 'off' API call to stonith which will turn the node off instead of rebooting it)
 .TP
 confirm <node>
-Confirm that the host specified is currently down.  WARNING: if this node is not actually down data corruption/cluster failure can occur.
+Confirm that the host specified is currently down.  This command should \fBONLY\fR be used when the node specified has already been confirmed to be down.
+
+.B WARNING: if this node is not actually down data corruption/cluster failure can occur.
 .SS "acl"
 .TP
 [show]
@@ -356,13 +388,13 @@ Remove the permission id specified (permission id's are listed in parenthesis af
 .SS "property"
 .TP
 list|show [<property> | \fB\-\-all\fR | \fB\-\-defaults\fR]
-List property settings (default: lists configured properties).  If \fB\-\-defaults\fR is specified will show all property defaults, if \fB\-\-all\fR is specified, current configured properties will be shown with unset properties and their defaults.
+List property settings (default: lists configured properties).  If \fB\-\-defaults\fR is specified will show all property defaults, if \fB\-\-all\fR is specified, current configured properties will be shown with unset properties and their defaults.  Run 'man pengine' and 'man crmd' to get a description of the properties.
 .TP
 set [\fB\-\-force\fR] [\fB\-\-node\fR <nodename>] <property>=[<value>]
-Set specific pacemaker properties (if the value is blank then the property is removed from the configuration).  If a property is not recognized by pcs the property will not be created unless the \fB\-\-force\fR is used. If \fB\-\-node\fR is used a node attribute is set on the specified node.
+Set specific pacemaker properties (if the value is blank then the property is removed from the configuration).  If a property is not recognized by pcs the property will not be created unless the \fB\-\-force\fR is used. If \fB\-\-node\fR is used a node attribute is set on the specified node.  Run 'man pengine' and 'man crmd' to get a description of the properties.
 .TP
 unset [\fB\-\-node\fR <nodename>] <property>
-Remove property from configuration (or remove attribute from specified node if \fB\-\-node\fR is used).
+Remove property from configuration (or remove attribute from specified node if \fB\-\-node\fR is used).  Run 'man pengine' and 'man crmd' to get a description of the properties.
 .SS "constraint"
 .TP
 [list|show] \fB\-\-full\fR
@@ -408,7 +440,7 @@ order show [\fB\-\-full\fR]
 List all current ordering constraints (if \fB\-\-full\fR is specified show the internal constraint id's as well).
 .TP
 order [action] <resource id> then [action] <resource id> [options]
-Add an ordering constraint specifying actions (start, stop, promote, demote) and if no action is specified the default action will be start. Available options are kind=Optional/Mandatory/Serialize, symmetrical=true/false and id=<constraint-id>.
+Add an ordering constraint specifying actions (start, stop, promote, demote) and if no action is specified the default action will be start.  Available options are kind=Optional/Mandatory/Serialize, symmetrical=true/false, require-all=true/false and id=<constraint-id>.
 .TP
 order set <resource1> <resource2> [resourceN]... [options] [set <resourceX> <resourceY> ... [options]] [setoptions [constraint_options]]
 Create an ordered set of resources. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave.  Available constraint_options are id=<constraint-id>, kind=Optional/Mandatory/Serialize and symmetrical=true/false.
@@ -420,7 +452,7 @@ colocation show [\fB\-\-full\fR]
 List all current colocation constraints (if \fB\-\-full\fR is specified show the internal constraint id's as well).
 .TP
 colocation add [master|slave] <source resource id> with [master|slave] <target resource id> [score] [options] [id=constraint-id]
-Request <source resource> to run on the same node where pacemaker has determined <target resource> should run.  Positive values of score mean the resources should be run on the same node, negative values mean the resources should not be run on the same node.  Specifying 'INFINITY' (or '\-INFINITY') for the score force <source resource> to run (or not run) with <target resource>. (score defaults to "INFINITY") A role can be master or slave (if no role is specified, it defaults to 'started').
+Request <source resource> to run on the same node where pacemaker has determined <target resource> should run.  Positive values of score mean the resources should be run on the same node, negative values mean the resources should not be run on the same node.  Specifying 'INFINITY' (or '\-INFINITY') for the score forces <source resource> to run (or not run) with <target resource> (score defaults to "INFINITY").  A role can be master or slave (if no role is specified, it defaults to 'started').
 .TP
 colocation set <resource1> <resource2> [resourceN]... [options] [set <resourceX> <resourceY> ... [options]] [setoptions [constraint_options]]
 Create a colocation constraint with a resource set. Available options are sequential=true/false, require-all=true/false, action=start/promote/demote/stop and role=Stopped/Started/Master/Slave. Available constraint_options are id, score, score-attribute and score-attribute-mangle.
@@ -460,7 +492,7 @@ Remove a rule if a rule id is specified, if rule is last rule in its constraint,
 .SS "status"
 .TP
 [status] [\fB\-\-full\fR]
-View all information about the cluster and resources (--full provides more details)
+View all information about the cluster and resources (\fB\-\-full\fR provides more details)
 .TP
 resources
 View current status of cluster resources
@@ -502,8 +534,34 @@ Show specified configuration checkpoint.
 checkpoint restore <checkpoint_number>
 Restore cluster configuration to specified checkpoint.
 .TP
-import-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] [output-format=corosync.conf|cluster.conf]
+import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] [output\-format=corosync.conf|cluster.conf]
 Converts CMAN cluster configuration to Pacemaker cluster configuration.  Converted configuration will be saved to 'output' file.  To send the configuration to the cluster nodes the 'pcs config restore' command can be used.  If \fB\-\-interactive\fR is specified you will be prompted to solve incompatibilities manually.  If no input is specified /etc/cluster/cluster.conf will be used.  You can force to create output containing either cluster.conf or corosync.conf using the output-format option.
+.TP
+import\-cman output=<filename> [input=<filename>] [\fB\-\-interactive\fR] output\-format=pcs-commands|pcs-commands-verbose
+Converts CMAN cluster configuration to a list of pcs commands which recreates the same cluster as Pacemaker cluster when executed.  Commands will be saved to 'output' file.  For other options see above.
+.TP
+export pcs\-commands|pcs\-commands\-verbose output=<filename>
+Creates a list of pcs commands which upon execution recreates the current cluster running on this node.  Commands will be saved to 'output' file.  Use pcs\-commands to get a simple list of commands, whereas pcs\-commands\-verbose creates a list including comments and debug messages.
+.SS "pcsd"
+.TP
+certkey <certificate file> <key file>
+Load custom certificate and key files for use in pcsd.
+.TP
+sync-certificates
+Sync pcsd certificates to all nodes found from current corosync.conf file (cluster.conf on systems running Corosync 1.x).  WARNING: This will restart pcsd daemon on the nodes.
+.TP
+clear-auth [\fB\-\-local\fR] [\fB\-\-remote\fR]
+Removes all system tokens which allow pcs/pcsd on the current system to authenticate with remote pcs/pcsd instances and vice\-versa.  After this command is run this node will need to be re\-authenticated with other nodes (using 'pcs cluster auth').  Using \fB\-\-local\fR only removes tokens used by local pcs (and pcsd if root) to connect to other pcsd instances, using \fB\-\-remote\fR clears authentication tokens used by remote systems to connect to the local pcsd instance.
+.SS "node"
+.TP
+maintenance [\fB\-\-all\fR] | [node]...
+Put specified node(s) into maintenance mode, if no node or options are specified the current node will be put into maintenance mode, if \fB\-\-all\fR is specified all nodes will be put into maintenace mode.
+.TP
+unmaintenance [\fB\-\-all\fR] | [node]...
+Remove node(s) from maintenance mode, if no node or options are specified the current node will be removed from maintenance mode, if \fB\-\-all\fR is specified all nodes will be removed from maintenance mode.
+.TP
+utilization [<node> [<name>=<value> ...]]
+Add specified utilization options to specified node. If node is not specified, shows utilization of all nodes. If utilization options are not specified, shows utilization of specified node. Utilization option should be in format name=value, value has to be integer. Options may be removed by setting an option without a value. Example: pcs node utilization node1 cpu=4 ram=
 .SH EXAMPLES
 .TP
 Show all resources
diff --git a/pcs/pcs.py b/pcs/pcs.py
index 69581c9..f41e334 100755
--- a/pcs/pcs.py
+++ b/pcs/pcs.py
@@ -1,6 +1,14 @@
 #!/usr/bin/python
 
-import sys, getopt, os
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import sys
+import os
+import getopt
+
 import usage
 import cluster
 import resource
@@ -12,12 +20,16 @@ import utils
 import status
 import settings
 import config
+import pcsd
+import node
+
 
 usefile = False
 filename = ""
 def main(argv):
     utils.subprocess_setup()
     global filename, usefile
+    orig_argv = argv[:]
     utils.pcs_options = {}
     modified_argv = []
     real_argv = []
@@ -46,7 +58,10 @@ def main(argv):
                     arg = "--wait"
             new_argv.append(arg)
         argv = new_argv
-                    
+
+        # h = help, f = file,
+        # p = password (cluster auth), u = user (cluster auth),
+        # V = verbose (cluster verify)
         pcs_short_options = "hf:p:u:V"
         pcs_short_options_with_args = []
         for c in pcs_short_options:
@@ -71,6 +86,7 @@ def main(argv):
             "token=", "token_coefficient=", "consensus=", "join=",
             "miss_count_const=", "fail_recv_const=",
             "corosync_conf=", "cluster_conf=",
+            "remote",
         ]
         # pull out negative number arguments and add them back after getopt
         prev_arg = ""
@@ -89,8 +105,8 @@ def main(argv):
             prev_arg = arg
 
         pcs_options, argv = getopt.gnu_getopt(modified_argv, pcs_short_options, pcs_long_options)
-    except getopt.GetoptError, err:
-        print err
+    except getopt.GetoptError as err:
+        print(err)
         usage.main()
         sys.exit(1)
     argv = real_argv
@@ -116,7 +132,7 @@ def main(argv):
         elif o == "--cluster_conf":
             settings.cluster_conf_file = a
         elif o == "--version":
-            print settings.pcs_version
+            print(settings.pcs_version)
             sys.exit()
         elif o == "--fullhelp":
             usage.full_usage()
@@ -131,25 +147,83 @@ def main(argv):
     command = argv.pop(0)
     if (command == "-h" or command == "help"):
         usage.main()
-    elif (command == "resource"):
-        resource.resource_cmd(argv)
-    elif (command == "cluster"):
-        cluster.cluster_cmd(argv)
-    elif (command == "stonith"):
-        stonith.stonith_cmd(argv)
-    elif (command == "property"):
-        prop.property_cmd(argv)
-    elif (command == "constraint"):
-        constraint.constraint_cmd(argv)
-    elif (command == "acl"):
-        acl.acl_cmd(argv)
-    elif (command == "status"):
-        status.status_cmd(argv)
-    elif (command == "config"):
-        config.config_cmd(argv)
-    else:
+        return
+    cmd_map = {
+        "resource": resource.resource_cmd,
+        "cluster": cluster.cluster_cmd,
+        "stonith": stonith.stonith_cmd,
+        "property": prop.property_cmd,
+        "constraint": constraint.constraint_cmd,
+        "acl": acl.acl_cmd,
+        "status": status.status_cmd,
+        "config": config.config_cmd,
+        "pcsd": pcsd.pcsd_cmd,
+        "node": node.node_cmd,
+    }
+    if command not in cmd_map:
         usage.main()
         sys.exit(1)
+    # root can run everything directly, also help can be displayed,
+    # working on a local file also do not need to run under root
+    if (os.getuid() == 0) or (argv and argv[0] == "help") or usefile:
+        cmd_map[command](argv)
+        return
+    # specific commands need to be run under root account, pass them to pcsd
+    # don't forget to allow each command in pcsd.rb in "post /run_pcs do"
+    root_command_list = [
+        ['cluster', 'auth', '...'],
+        ['cluster', 'corosync', '...'],
+        ['cluster', 'destroy', '...'],
+        ['cluster', 'disable', '...'],
+        ['cluster', 'enable', '...'],
+        ['cluster', 'node', '...'],
+        ['cluster', 'pcsd-status', '...'],
+        ['cluster', 'setup', '...'],
+        ['cluster', 'start', '...'],
+        ['cluster', 'stop', '...'],
+        ['cluster', 'sync', '...'],
+        # ['config', 'restore', '...'], # handled in config.config_restore
+        ['pcsd', 'sync-certificates'],
+        ['status', 'nodes', 'corosync-id'],
+        ['status', 'nodes', 'pacemaker-id'],
+        ['status', 'pcsd', '...'],
+    ]
+    argv_cmd = argv[:]
+    argv_cmd.insert(0, command)
+    for root_cmd in root_command_list:
+        if (
+            (argv_cmd == root_cmd)
+            or
+            (
+                root_cmd[-1] == "..."
+                and
+                argv_cmd[:len(root_cmd)-1] == root_cmd[:-1]
+            )
+        ):
+            # handle interactivity of 'pcs cluster auth'
+            if argv_cmd[0:2] == ["cluster", "auth"]:
+                if "-u" not in utils.pcs_options:
+                    username = utils.get_terminal_input('Username: ')
+                    orig_argv.extend(["-u", username])
+                if "-p" not in utils.pcs_options:
+                    password = utils.get_terminal_password()
+                    orig_argv.extend(["-p", password])
+
+            # call the local pcsd
+            err_msgs, exitcode, std_out, std_err = utils.call_local_pcsd(
+                orig_argv, True
+            )
+            if err_msgs:
+                for msg in err_msgs:
+                    utils.err(msg, False)
+                sys.exit(1)
+            if std_out.strip():
+                print(std_out)
+            if std_err.strip():
+                sys.stderr.write(std_err)
+            sys.exit(exitcode)
+            return
+    cmd_map[command](argv)
 
 if __name__ == "__main__":
   main(sys.argv[1:])
diff --git a/pcs/pcsd.py b/pcs/pcsd.py
new file mode 100644
index 0000000..1a83a03
--- /dev/null
+++ b/pcs/pcsd.py
@@ -0,0 +1,173 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import sys
+import os
+import errno
+import json
+
+import usage
+import utils
+import settings
+
+
+def pcsd_cmd(argv):
+    if len(argv) == 0:
+        usage.pcsd()
+        sys.exit(1)
+
+    sub_cmd = argv.pop(0)
+    if sub_cmd == "help":
+        usage.pcsd(argv)
+    elif sub_cmd == "certkey":
+        pcsd_certkey(argv)
+    elif sub_cmd == "sync-certificates":
+        pcsd_sync_certs(argv)
+    elif sub_cmd == "clear-auth":
+        pcsd_clear_auth(argv)
+    else:
+        usage.pcsd()
+        sys.exit(1)
+
+def pcsd_certkey(argv):
+    if len(argv) != 2:
+        usage.pcsd(["certkey"])
+        exit(1)
+
+    certfile = argv[0]
+    keyfile = argv[1]
+
+    try:
+        with open(certfile, 'r') as myfile:
+            cert = myfile.read()
+        with open(keyfile, 'r') as myfile:
+            key = myfile.read()
+    except IOError as e:
+        utils.err(e)
+    errors = utils.verify_cert_key_pair(cert, key)
+    if errors:
+        for err in errors:
+            utils.err(err, False)
+        sys.exit(1)
+
+    if not "--force" in utils.pcs_options and (os.path.exists(settings.pcsd_cert_location) or os.path.exists(settings.pcsd_key_location)):
+        utils.err("certificate and/or key already exists, your must use --force to overwrite")
+
+    try:
+        try:
+            os.chmod(settings.pcsd_cert_location, 0o700)
+        except OSError: # If the file doesn't exist, we don't care
+            pass
+
+        try:
+            os.chmod(settings.pcsd_key_location, 0o700)
+        except OSError: # If the file doesn't exist, we don't care
+            pass
+
+        with os.fdopen(os.open(settings.pcsd_cert_location, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o700), 'w') as myfile:
+            myfile.write(cert)
+
+        with os.fdopen(os.open(settings.pcsd_key_location, os.O_WRONLY | os.O_CREAT | os.O_TRUNC, 0o700), 'w') as myfile:
+            myfile.write(key)
+
+    except IOError as e:
+        utils.err(e)
+
+    print("Certificate and key updated, you may need to restart pcsd (service pcsd restart) for new settings to take effect")
+
+def pcsd_sync_certs(argv, exit_after_error=True):
+    error = False
+    nodes_sync = argv if argv else utils.getNodesFromCorosyncConf()
+    nodes_restart = []
+
+    print("Synchronizing pcsd certificates on nodes {0}...".format(
+        ", ".join(nodes_sync)
+    ))
+    pcsd_data = {
+        "nodes": nodes_sync,
+    }
+    output, retval = utils.run_pcsdcli("send_local_certs", pcsd_data)
+    if retval == 0 and output["status"] == "ok" and output["data"]:
+        try:
+            sync_result = output["data"]
+            if sync_result["node_status"]:
+                for node, status in sync_result["node_status"].items():
+                    print("{0}: {1}".format(node, status["text"]))
+                    if status["status"] == "ok":
+                        nodes_restart.append(node)
+                    else:
+                        error = True
+            if sync_result["status"] != "ok":
+                error = True
+                utils.err(sync_result["text"], False)
+            if error and not nodes_restart:
+                if exit_after_error:
+                    sys.exit(1)
+                else:
+                    return
+            print()
+        except (KeyError, AttributeError):
+            utils.err("Unable to communicate with pcsd", exit_after_error)
+            return
+    else:
+        utils.err("Unable to sync pcsd certificates", exit_after_error)
+        return
+
+    print("Restarting pcsd on the nodes in order to reload the certificates...")
+    pcsd_data = {
+        "nodes": nodes_restart,
+    }
+    output, retval = utils.run_pcsdcli("pcsd_restart_nodes", pcsd_data)
+    if retval == 0 and output["status"] == "ok" and output["data"]:
+        try:
+            restart_result = output["data"]
+            if restart_result["node_status"]:
+                for node, status in restart_result["node_status"].items():
+                    print("{0}: {1}".format(node, status["text"]))
+                    if status["status"] != "ok":
+                        error = True
+            if restart_result["status"] != "ok":
+                error = True
+                utils.err(restart_result["text"], False)
+            if error:
+                if exit_after_error:
+                    sys.exit(1)
+                else:
+                    return
+        except (KeyError, AttributeError):
+            utils.err("Unable to communicate with pcsd", exit_after_error)
+            return
+    else:
+        utils.err("Unable to restart pcsd", exit_after_error)
+        return
+
+def pcsd_clear_auth(argv):
+    output = []
+    files = []
+    if os.geteuid() == 0:
+        pcsd_tokens_file = settings.pcsd_tokens_location
+    else:
+        pcsd_tokens_file = os.path.expanduser("~/.pcs/tokens")
+
+    if '--local' in utils.pcs_options:
+        files.append(pcsd_tokens_file)
+    if '--remote' in utils.pcs_options:
+        files.append(settings.pcsd_users_conf_location)
+
+    if len(files) == 0:
+        files.append(pcsd_tokens_file)
+        files.append(settings.pcsd_users_conf_location)
+
+    for f in files:
+        try:
+            os.remove(f)
+        except OSError as e:
+            if (e.errno != errno.ENOENT):
+                output.append(e.strerror + " (" + f + ")")
+
+    if len(output) > 0:
+        for o in output:
+            print("Error: " + o)
+        sys.exit(1)
diff --git a/pcs/prop.py b/pcs/prop.py
index 3b1c15a..592e4d8 100644
--- a/pcs/prop.py
+++ b/pcs/prop.py
@@ -1,10 +1,15 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
 import sys
+from xml.dom.minidom import parseString
+import xml.etree.ElementTree as ET
+
 import usage
 import utils
 import settings
-import xml.dom.minidom
-from xml.dom.minidom import parseString
-import xml.etree.ElementTree as ET
 
 def property_cmd(argv):
     if len(argv) == 0:
@@ -27,14 +32,13 @@ def set_property(argv):
     for arg in argv:
         args = arg.split('=')
         if (len(args) != 2):
-            print "Invalid Property: " + arg
+            print("Invalid Property: " + arg)
             continue
         if "--node" in utils.pcs_options:
             utils.set_node_attribute(args[0], args[1], utils.pcs_options["--node"])
         elif ("--force" in utils.pcs_options) or utils.is_valid_property(args[0]):
-            id_valid, id_error = utils.validate_xml_id(args[0], 'property name')
-            if not id_valid:
-                utils.err(id_error)
+            if not args[0]:
+                utils.err("property name cannot be empty")
             utils.set_cib_property(args[0],args[1])
         else:
             utils.err("unknown cluster property: '%s', (use --force to override)" % args[0])
@@ -69,18 +73,18 @@ def list_property(argv):
             properties
         )
 
-    print "Cluster Properties:"
-    for prop,val in sorted(properties.iteritems()):
-        print " " + prop + ": " + val
+    print("Cluster Properties:")
+    for prop,val in sorted(properties.items()):
+        print(" " + prop + ": " + val)
 
     node_attributes = utils.get_node_attributes()
     if node_attributes:
-        print "Node Attributes:"
+        print("Node Attributes:")
         for node in sorted(node_attributes):
-            print " " + node + ":",
+            line_parts = [" " + node + ":"]
             for attr in node_attributes[node]:
-                print attr,
-            print
+                line_parts.append(attr)
+            print(" ".join(line_parts))
 
 def get_default_properties():
     (output, retVal) = utils.run([settings.pengine_binary, "metadata"])
diff --git a/pcs/resource.py b/pcs/resource.py
index a63b691..89e7ac9 100644
--- a/pcs/resource.py
+++ b/pcs/resource.py
@@ -1,16 +1,25 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
 import sys
 import os
-import time
 import xml.dom.minidom
 from xml.dom.minidom import getDOMImplementation
 from xml.dom.minidom import parseString
-import usage
-import utils
 import re
 import textwrap
-import xml.etree.ElementTree as ET
-import tempfile
+import time
+
+import usage
+import utils
 import constraint
+import stonith
+
+
+PACEMAKER_WAIT_TIMEOUT_STATUS = 62
+RESOURCE_RELOCATE_CONSTRAINT_PREFIX = "pcs-relocate-"
 
 def resource_cmd(argv):
     if len(argv) == 0:
@@ -90,7 +99,15 @@ def resource_cmd(argv):
     elif (sub_cmd == "restart"):
         resource_restart(argv)
     elif (sub_cmd == "debug-start"):
-        resource_force_start(argv)
+        resource_force_action(sub_cmd, argv)
+    elif (sub_cmd == "debug-stop"):
+        resource_force_action(sub_cmd, argv)
+    elif (sub_cmd == "debug-promote"):
+        resource_force_action(sub_cmd, argv)
+    elif (sub_cmd == "debug-demote"):
+        resource_force_action(sub_cmd, argv)
+    elif (sub_cmd == "debug-monitor"):
+        resource_force_action(sub_cmd, argv)
     elif (sub_cmd == "manage"):
         resource_manage(argv, True)
     elif (sub_cmd == "unmanage"):
@@ -136,6 +153,15 @@ def resource_cmd(argv):
             resource_cleanup(res_id)
     elif (sub_cmd == "history"):
         resource_history(argv)
+    elif (sub_cmd == "relocate"):
+        resource_relocate(argv)
+    elif (sub_cmd == "utilization"):
+        if len(argv) == 0:
+            print_resources_utilization()
+        elif len(argv) == 1:
+            print_resource_utilization(argv.pop(0))
+        else:
+            set_resource_utilization(argv.pop(0), argv)
     else:
         usage.resource()
         sys.exit(1)
@@ -185,13 +211,28 @@ def parse_resource_options(argv, with_clone=False):
 # List available resources
 # TODO make location more easily configurable
 def resource_list_available(argv):
-    ret = ""
+    def get_name_and_desc(full_res_name, metadata):
+        sd = ""
+        try:
+            dom = parseString(metadata)
+            shortdesc = dom.documentElement.getElementsByTagName("shortdesc")
+            if len(shortdesc) > 0:
+                sd = " - " +  format_desc(
+                    len(full_res_name + " - "),
+                    shortdesc[0].firstChild.nodeValue.strip().replace("\n", " ")
+                )
+        except xml.parsers.expat.ExpatError:
+            sd = ""
+        finally:
+            return full_res_name + sd
+
+    ret = []
     if len(argv) != 0:
         filter_string = argv[0]
     else:
         filter_string = ""
 
-# ocf agents
+    # ocf agents
     os.environ['OCF_ROOT'] = "/usr/lib/ocf/"
     providers = sorted(os.listdir("/usr/lib/ocf/resource.d"))
     for provider in providers:
@@ -204,118 +245,168 @@ def resource_list_available(argv):
                 continue
 
             if "--nodesc" in utils.pcs_options:
-                ret += full_res_name + "\n"
+                ret.append(full_res_name)
                 continue
 
             metadata = utils.get_metadata("/usr/lib/ocf/resource.d/" + provider + "/" + resource)
             if metadata == False:
                 continue
-            sd = ""
-            try:
-                dom = parseString(metadata)
-                shortdesc = dom.documentElement.getElementsByTagName("shortdesc")
-                if len(shortdesc) > 0:
-                    sd = " - " +  format_desc(full_res_name.__len__() + 3, shortdesc[0].firstChild.nodeValue.strip().replace("\n", " "))
-            except xml.parsers.expat.ExpatError:
-                sd = ""
-            finally:
-                ret += full_res_name + sd + "\n"
-# lsb agents
+            ret.append(get_name_and_desc(
+                "ocf:" + provider + ":" + resource,
+                metadata
+            ))
+
+    # lsb agents
     lsb_dir = "/etc/init.d/"
     agents = sorted(os.listdir(lsb_dir))
     for agent in agents:
         if os.access(lsb_dir + agent, os.X_OK):
-            ret += "lsb:" + agent + "\n"
-# systemd agents
+            ret.append("lsb:" + agent)
+
+    # systemd agents
     if utils.is_systemctl():
         agents, retval = utils.run(["systemctl", "list-unit-files", "--full"])
         agents = agents.split("\n")
-
     for agent in agents:
         match = re.search(r'^([\S]*)\.service',agent)
         if match:
-            ret += "systemd:" + match.group(1) + "\n"
+            ret.append("systemd:" + match.group(1))
 
+    # nagios metadata
+    nagios_metadata_path = "/usr/share/pacemaker/nagios/plugins-metadata"
+    if os.path.isdir(nagios_metadata_path):
+        for metadata_file in sorted(os.listdir(nagios_metadata_path)):
+            if metadata_file.startswith("."):
+                continue
+            full_res_name = "nagios:" + metadata_file
+            if full_res_name.lower().endswith(".xml"):
+                full_res_name = full_res_name[:-len(".xml")]
+            if "--nodesc" in utils.pcs_options:
+                ret.append(full_res_name)
+                continue
+            try:
+                ret.append(get_name_and_desc(
+                    full_res_name,
+                    open(
+                        os.path.join(nagios_metadata_path, metadata_file),
+                        "r"
+                    ).read()
+                ))
+            except EnvironmentError as e:
+                pass
+
+    # output
     if not ret:
         utils.err(
             "No resource agents available. "
             "Do you have resource agents installed?"
         )
     if filter_string != "":
-        rlines = ret.split("\n")
         found = False
-        for rline in rlines:
+        for rline in ret:
             if rline.lower().find(filter_string.lower()) != -1:
-                print rline
+                print(rline)
                 found = True
         if not found:
             utils.err("No resource agents matching the filter.")
     else:
-        print ret,
+        print("\n".join(ret))
+
+def resource_parse_options(metadata, standard, provider, resource):
+    try:
+        short_desc = ""
+        long_desc = ""
+        dom = parseString(metadata)
+        long_descs = dom.documentElement.getElementsByTagName("longdesc")
+        for ld in long_descs:
+            if ld.parentNode.tagName == "resource-agent" and ld.firstChild:
+                long_desc = ld.firstChild.data.strip()
+                break
 
+        short_descs = dom.documentElement.getElementsByTagName("shortdesc")
+        for sd in short_descs:
+            if sd.parentNode.tagName == "resource-agent" and sd.firstChild:
+                short_desc = sd.firstChild.data.strip()
+                break
+
+        if provider:
+            title_1 = "%s:%s:%s" % (standard, provider, resource)
+        else:
+            title_1 = "%s:%s" % (standard, resource)
+
+        if short_desc:
+            title_1 += " - " + format_desc(len(title_1 + " - "), short_desc)
+        print(title_1)
+        print()
+        if long_desc:
+            print(long_desc)
+            print()
+
+        params = dom.documentElement.getElementsByTagName("parameter")
+        if len(params) > 0:
+            print("Resource options:")
+        for param in params:
+            name = param.getAttribute("name")
+            if param.getAttribute("required") == "1":
+                name += " (required)"
+            desc = ""
+            longdesc_els = param.getElementsByTagName("longdesc")
+            if longdesc_els and longdesc_els[0].firstChild:
+                desc = longdesc_els[0].firstChild.nodeValue.strip().replace("\n", " ")
+            if not desc:
+                desc = "No description available"
+            indent = name.__len__() + 4
+            desc = format_desc(indent, desc)
+            print("  " + name + ": " + desc)
+    except xml.parsers.expat.ExpatError as e:
+        utils.err("Unable to parse xml for '%s': %s" % (resource, e))
 
 def resource_list_options(resource):
     found_resource = False
     resource = get_full_ra_type(resource,True)
+
+    # we know this is the nagios resource standard
+    if "nagios:" in resource:
+        resource_split = resource.split(":",2)
+        resource = resource_split[1]
+        standard = "nagios"
+        try:
+            with open("/usr/share/pacemaker/nagios/plugins-metadata/" + resource + ".xml",'r') as f:
+                resource_parse_options(f.read(), standard, None, resource)
+        except IOError as e:
+            utils.err ("Unable to find resource: %s" % resource)
+        return
+
+    # we know this is the nagios resource standard
     if "ocf:" in resource:
         resource_split = resource.split(":",3)
-        providers = [resource_split[1]]
+        provider = resource_split[1]
         resource = resource_split[2]
-    else:
-        providers = sorted(os.listdir("/usr/lib/ocf/resource.d"))
+        standard = "ocf"
+        metadata = utils.get_metadata("/usr/lib/ocf/resource.d/" + provider + "/" + resource)
+        if metadata:
+            resource_parse_options(metadata, standard, provider, resource)
+        else:
+            utils.err ("Unable to find resource: %s" % resource)
+        return
+
+    # no standard was give, lets search all ocf providers first
+    providers = sorted(os.listdir("/usr/lib/ocf/resource.d"))
     for provider in providers:
         metadata = utils.get_metadata("/usr/lib/ocf/resource.d/" + provider + "/" + resource)
         if metadata == False:
             continue
         else:
+            resource_parse_options(metadata, "ocf", provider, resource)
             found_resource = True
-        
-        try:
-            short_desc = ""
-            long_desc = ""
-            dom = parseString(metadata)
-            long_descs = dom.documentElement.getElementsByTagName("longdesc")
-            for ld in long_descs:
-                if ld.parentNode.tagName == "resource-agent" and ld.firstChild:
-                    long_desc = ld.firstChild.data.strip()
-                    break
-
-            short_descs = dom.documentElement.getElementsByTagName("shortdesc")
-            for sd in short_descs:
-                if sd.parentNode.tagName == "resource-agent" and sd.firstChild:
-                    short_desc = sd.firstChild.data.strip()
-                    break
-            
-            title_1 = "ocf:%s:%s" % (provider, resource)
-            if short_desc:
-                title_1 += " - " + format_desc(len(title_1 + " - "), short_desc)
-            print title_1
-            print 
-            if long_desc:
-                print long_desc
-                print
-
-            params = dom.documentElement.getElementsByTagName("parameter")
-            if len(params) > 0:
-                print "Resource options:"
-            for param in params:
-                name = param.getAttribute("name")
-                if param.getAttribute("required") == "1":
-                    name += " (required)"
-                desc = ""
-                longdesc_els = param.getElementsByTagName("longdesc")
-                if longdesc_els and longdesc_els[0].firstChild:
-                    desc = longdesc_els[0].firstChild.nodeValue.strip().replace("\n", "")
-                if not desc:
-                    desc = "No description available"
-                indent = name.__len__() + 4
-                desc = format_desc(indent, desc)
-                print "  " + name + ": " + desc
-        except xml.parsers.expat.ExpatError as e:
-            utils.err("Unable to parse xml for '%s': %s" % (resource, e))
 
+    # still not found, now lets look at nagios plugins
     if not found_resource:
-        utils.err ("Unable to find resource: %s" % resource)
+        try:
+            with open("/usr/share/pacemaker/nagios/plugins-metadata/" + resource + ".xml",'r') as f:
+                resource_parse_options(f.read(), "nagios", None, resource)
+        except IOError as e:
+            utils.err ("Unable to find resource: %s" % resource)
 
 # Return the string formatted with a line length of 79 and indented
 def format_desc(indent, desc):
@@ -341,24 +432,19 @@ def format_desc(indent, desc):
 # ra_class, ra_type & ra_provider must all contain valid info
 def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_opts=[]):
     if "--wait" in utils.pcs_options:
-        if utils.usefile:
-            utils.err("Cannot use '-f' together with '--wait'")
+        wait_timeout = utils.validate_wait_get_timeout()
         if "--disabled" in utils.pcs_options:
             utils.err("Cannot use '--wait' together with '--disabled'")
-        if "target-role=Stopped" in meta_values:
-            utils.err("Cannot use '--wait' together with 'target-role=Stopped'")
-
-    wait = False
-    wait_timeout = None
-    if "--wait" in utils.pcs_options:
-        wait = True
-        if utils.pcs_options["--wait"] is not None:
-            wait_timeout = utils.pcs_options["--wait"]
-            if not wait_timeout.isdigit():
-                utils.err(
-                    "%s is not a valid number of seconds to wait"
-                    % wait_timeout
-                )
+        do_not_run = ["target-role=stopped"]
+        if (
+            "--master" in utils.pcs_options or "--clone" in utils.pcs_options
+            or
+            clone_opts
+        ):
+            do_not_run.extend(["clone-max=0", "clone-node-max=0"])
+        for opt in meta_values + clone_opts:
+            if opt.lower() in do_not_run:
+                utils.err("Cannot use '--wait' together with '%s'" % opt)
 
     ra_id_valid, ra_id_error = utils.validate_xml_id(ra_id, 'resource name')
     if not ra_id_valid:
@@ -376,7 +462,7 @@ def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_
     if not utils.is_valid_resource(ra_type) and not ("--force" in utils.pcs_options):
         utils.err ("Unable to create resource '%s', it is not installed on this system (use --force to override)" % ra_type)
 
-    if utils.does_exist('//resources/descendant::primitive[@id="'+ra_id+'"]'):
+    if utils.does_id_exist(dom, ra_id):
         utils.err("unable to create resource/fence device '%s', '%s' already exists on this system" % (ra_id,ra_id))
 
 
@@ -425,7 +511,7 @@ def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_
                         while interval in action_intervals[op_action]:
                             interval += 1
                         op[key] = "interval=%s" % interval
-                        print (
+                        print(
                             ("Warning: changing a %s operation interval from %s"
                                 + " to %s to make the operation unique")
                             % (op_action, old_interval, interval)
@@ -449,16 +535,6 @@ def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_
         ]
         meta_values.append("target-role=Stopped")
 
-    if wait and wait_timeout is None:
-        for op in op_values_all:
-            if op[0] == "start":
-                for op_setting in op[1:]:
-                    match = re.match("timeout=(.+)", op_setting)
-                    if match:
-                        wait_timeout = utils.get_timeout_seconds(match.group(1))
-        if wait_timeout is None:
-            wait_timeout = utils.get_default_op_timeout()
-
 # If it's a master all meta values go to the master
     master_meta_values = []
     if "--master" in utils.pcs_options:
@@ -470,7 +546,7 @@ def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_
     primitive_values.insert(0,("id",ra_id))
     meta_attributes = convert_args_to_meta_attrs(meta_values, ra_id)
     if not "--force" in utils.pcs_options and utils.does_resource_have_options(ra_type):
-        params = convert_args_to_tuples(ra_values)
+        params = utils.convert_args_to_tuples(ra_values)
         bad_opts, missing_req_opts = utils.validInstanceAttributes(ra_id, params , get_full_ra_type(ra_type, True))
         if len(bad_opts) != 0:
             utils.err ("resource option(s): '%s', are not recognized for resource type: '%s' (use --force to override)" \
@@ -489,45 +565,50 @@ def resource_create(ra_id, ra_type, ra_values, op_values, meta_values=[], clone_
     for op in op_values_agent:
         dom = resource_operation_add(dom, ra_id, op, validate=False)
     for op in op_values:
-        dom = resource_operation_add(dom, ra_id, op, validate=True)
+        dom = resource_operation_add(
+            dom, ra_id, op, validate=True, validate_strict=False
+        )
 
-    expected_instances = 1
     if "--clone" in utils.pcs_options or len(clone_opts) > 0:
-        dom = resource_clone_create(dom, [ra_id] + clone_opts)
-        expected_instances = utils.count_expected_resource_instances(
-            utils.dom_get_clone(dom, ra_id + "-clone"),
-            len(utils.getNodesFromPacemaker())
-        )
+        dom, clone_id = resource_clone_create(dom, [ra_id] + clone_opts)
         if "--group" in utils.pcs_options:
-            print "Warning: --group ignored when creating a clone"
+            print("Warning: --group ignored when creating a clone")
         if "--master" in utils.pcs_options:
-            print "Warning: --master ignored when creating a clone"
+            print("Warning: --master ignored when creating a clone")
     elif "--master" in utils.pcs_options:
-        dom = resource_master_create(dom, [ra_id] + master_meta_values)
-        expected_instances = utils.count_expected_resource_instances(
-            utils.dom_get_master(dom, ra_id + "-master"),
-            len(utils.getNodesFromPacemaker())
+        dom, master_id = resource_master_create(
+            dom, [ra_id] + master_meta_values
         )
         if "--group" in utils.pcs_options:
-            print "Warning: --group ignored when creating a master"
+            print("Warning: --group ignored when creating a master")
     elif "--group" in utils.pcs_options:
         groupname = utils.pcs_options["--group"]
         dom = resource_group_add(dom, groupname, [ra_id])
 
     utils.replace_cib_configuration(dom)
 
-    if wait:
-        running, message = utils.is_resource_started(
-            ra_id, int(wait_timeout), count=expected_instances
-        )
-        if running:
-            print message
+    if "--wait" in utils.pcs_options:
+        args = ["crm_resource", "--wait"]
+        if wait_timeout:
+            args.extend(["--timeout=%s" % wait_timeout])
+        output, retval = utils.run(args)
+        running_on = utils.resource_running_on(ra_id)
+        if retval == 0 and running_on["is_running"]:
+            print(running_on["message"])
         else:
-            utils.err(
-                "unable to start: '%s', please check logs for failure "
-                    "information\n%s"
-                % (ra_id, message)
-            )
+            msg = []
+            if retval == PACEMAKER_WAIT_TIMEOUT_STATUS:
+                msg.append("waiting timeout")
+            else:
+                msg.append(
+                    "unable to start: '%s', please check logs for failure "
+                    "information"
+                    % ra_id
+                )
+            msg.append(running_on["message"])
+            if retval != 0 and output:
+                msg.append("\n" + output)
+            utils.err("\n".join(msg).strip())
 
 def resource_move(argv,clear=False,ban=False):
     other_options = []
@@ -618,57 +699,27 @@ def resource_move(argv,clear=False,ban=False):
         else:
             utils.err("when specifying --master you must use the master id")
 
-    wait = False
-    if "--wait" in utils.pcs_options and not clear:
-        if utils.usefile:
-            utils.err("Cannot use '-f' together with '--wait'")
-        if not utils.is_resource_started(resource_id, 0)[0]:
-            print "Warning: Cannot use '--wait' on non-running resources"
-        else:
-            wait = True
-    if wait:
-        timeout = utils.pcs_options["--wait"]
-        if timeout is None:
-            timeout = (
-                utils.get_resource_op_timeout(dom, resource_id, "stop")
-                +
-                utils.get_resource_op_timeout(dom, resource_id, "start")
-            )
-        elif not timeout.isdigit():
-            utils.err("You must specify the number of seconds to wait")
-        allowed_nodes = set()
-        banned_nodes = set()
-        if dest_node and ban:
-            banned_nodes = set([dest_node])
-        elif dest_node:
-            allowed_nodes = set([dest_node])
-        else:
-            state = utils.getClusterState()
+    if "--wait" in utils.pcs_options:
+        wait_timeout = utils.validate_wait_get_timeout()
+        if not clear:
             running_on = utils.resource_running_on(resource_id)
-            banned_nodes = set(
-                running_on["nodes_master"] + running_on["nodes_started"]
-            )
-
-    if "--wait" in utils.pcs_options and clear:
-        if utils.usefile:
-            utils.err("Cannot use '-f' together with '--wait'")
-        wait = True
-        timeout = utils.pcs_options["--wait"]
-        if timeout and not timeout.isdigit():
-            utils.err("You must specify the number of seconds to wait")
-        try:
-            tmp_cib = tempfile.NamedTemporaryFile("w+b", -1, ".pcs")
-            tmp_cib.write(utils.get_cib_dom().toxml())
-            tmp_cib.seek(0)
-        except EnvironmentError as e:
-            utils.err("Unable to determine what to wait for:\n%s" % e)
-        utils.usefile = True
-        utils.filename = tmp_cib.name
+            was_running = running_on["is_running"]
+            allowed_nodes = set()
+            banned_nodes = set()
+            if dest_node and ban: # ban, node specified
+                banned_nodes = set([dest_node])
+            elif dest_node: # move, node specified
+                allowed_nodes = set([dest_node])
+            else: # move or ban, node not specified
+                banned_nodes = set(
+                    running_on["nodes_master"] + running_on["nodes_started"]
+                )
 
     if "--master" in utils.pcs_options:
         other_options.append("--master")
     if lifetime is not None:
         other_options.append("--lifetime=%s" % lifetime)
+
     if clear:
         if dest_node:
             output,ret = utils.run(["crm_resource", "--resource", resource_id, "--clear", "--host", dest_node] + other_options)
@@ -689,37 +740,64 @@ def resource_move(argv,clear=False,ban=False):
         if "Resource '"+resource_id+"' not moved: active in 0 locations." in output:
             utils.err("You must specify a node when moving/banning a stopped resource")
         utils.err ("error moving/banning/clearing resource\n" + output)
-
-    if wait and not clear:
-        success, message = utils.is_resource_started(
-            resource_id, int(timeout), allowed_nodes=allowed_nodes,
-            banned_nodes=banned_nodes
+    else:
+        warning_re = re.compile(
+            r"WARNING: Creating rsc_location constraint '([^']+)' "
+            + r"with a score of -INFINITY for resource ([\S]+) on (.+)."
         )
-        if success:
-            print message
-        else:
-            utils.err("Unable to start '%s'\n%s" % (resource_id, message))
+        for line in output.split("\n"):
+            warning_match = warning_re.search(line)
+            if warning_match:
+                warning_constraint = warning_match.group(1)
+                warning_resource = warning_match.group(2)
+                warning_node = warning_match.group(3)
+                warning_action = "running"
+                if "--master" in utils.pcs_options:
+                    warning_action = "being promoted"
+                print(("Warning: Creating location constraint {0} with a score "
+                    + "of -INFINITY for resource {1} on node {2}.").format(
+                        warning_constraint, warning_resource, warning_node
+                    ))
+                print(("This will prevent {0} from {1} on {2} until the "
+                    + "constraint is removed. This will be the case even if {3}"
+                    + " is the last node in the cluster.").format(
+                        warning_resource, warning_action, warning_node,
+                        warning_node
+                    ))
 
-    if wait and clear:
-        utils.usefile = False
-        utils.filename = ""
-        try:
-            tmp_cib.seek(0)
-            tmp_cib_dom = parseString(tmp_cib.read())
-        except (EnvironmentError, xml.parsers.expat.ExpatError) as e:
-            utils.err("Unable to determine what to wait for:\n%s" % e)
-        except xml.etree.ElementTree.ParseError as e:
-            utils.err("Unable to determine what to wait for:\n%s" % e)
-        output, transitions_dom, new_cib_dom = utils.simulate_cib(tmp_cib_dom)
-        op_list = utils.get_operations_from_transitions(transitions_dom)
-        my_op_list = [op for op in op_list if op[0] == resource_id]
-
-        utils.replace_cib_configuration(tmp_cib_dom)
-
-        if my_op_list:
-            utils.wait_for_primitive_ops_to_process(my_op_list, timeout)
+    if "--wait" in utils.pcs_options:
+        args = ["crm_resource", "--wait"]
+        if wait_timeout:
+            args.extend(["--timeout=%s" % wait_timeout])
+        output, retval = utils.run(args)
+        running_on = utils.resource_running_on(resource_id)
+        running_nodes = running_on["nodes_started"] + running_on["nodes_master"]
+        error = retval != 0
+        if ban and (
+            not banned_nodes.isdisjoint(running_nodes)
+            or
+            (was_running and not running_nodes)
+        ):
+            error = True
+        if (
+            not ban and not clear and was_running # running resource moved
+            and (
+                not running_nodes
+                or
+                (allowed_nodes and allowed_nodes.isdisjoint(running_nodes))
+           )
+        ):
+            error = True
+        if not error:
+            print(running_on["message"])
         else:
-            print utils.resource_running_on(resource_id)["message"]
+            msg = []
+            if retval == PACEMAKER_WAIT_TIMEOUT_STATUS:
+                msg.append("waiting timeout")
+            msg.append(running_on["message"])
+            if retval != 0 and output:
+                msg.append("\n" + output)
+            utils.err("\n".join(msg).strip())
 
 def resource_standards(return_output=False):
     output, retval = utils.run(["crm_resource","--list-standards"], True)
@@ -728,13 +806,13 @@ def resource_standards(return_output=False):
     output = output.strip()
     if return_output == True:
         return output
-    print output
+    print(output)
 
 def resource_providers():
     output, retval = utils.run(["crm_resource","--list-ocf-providers"],True)
     # Return value is ignored because it contains the number of providers
     # returned, not an error code
-    print output.strip()
+    print(output.strip())
 
 def resource_agents(argv):
     if len(argv) > 1:
@@ -751,7 +829,7 @@ def resource_agents(argv):
         preg = re.compile(r'\d+ agents found for standard.*$', re.MULTILINE)
         output = preg.sub("", output)
         output = output.strip()
-        print output
+        print(output)
 
 # Update a resource, removing any args that are empty and adding/updating
 # args that are not empty
@@ -762,9 +840,9 @@ def resource_update(res_id,args):
     ra_values, op_values, meta_values = parse_resource_options(args)
 
     wait = False
+    wait_timeout = None
     if "--wait" in utils.pcs_options:
-        if utils.usefile:
-            utils.err("Cannot use '-f' together with '--wait'")
+        wait_timeout = utils.validate_wait_get_timeout()
         wait = True
 
     resource = None
@@ -784,7 +862,8 @@ def resource_update(res_id,args):
             for a in c.childNodes:
                 if a.localName == "primitive" or a.localName == "group":
                     return resource_update_clone_master(
-                        dom, clone, "clone", a.getAttribute("id"), args, wait
+                        dom, clone, "clone", a.getAttribute("id"), args,
+                        wait, wait_timeout
                     )
 
         master = None
@@ -795,17 +874,11 @@ def resource_update(res_id,args):
 
         if master:
             return resource_update_clone_master(
-                dom, master, "master", res_id, args, wait
+                dom, master, "master", res_id, args, wait, wait_timeout
             )
 
         utils.err ("Unable to find resource: %s" % res_id)
 
-    if wait:
-        node_count = len(utils.getNodesFromPacemaker())
-        status_old = utils.get_resource_status_for_wait(
-            dom, resource, node_count
-        )
-
     instance_attributes = resource.getElementsByTagName("instance_attributes")
     if len(instance_attributes) == 0:
         instance_attributes = dom.createElement("instance_attributes")
@@ -814,7 +887,7 @@ def resource_update(res_id,args):
     else:
         instance_attributes = instance_attributes[0]
     
-    params = convert_args_to_tuples(ra_values)
+    params = utils.convert_args_to_tuples(ra_values)
     if not "--force" in utils.pcs_options and (resource.getAttribute("class") == "ocf" or resource.getAttribute("class") == "stonith"):
         resClass = resource.getAttribute("class")
         resProvider = resource.getAttribute("provider")
@@ -854,7 +927,7 @@ def resource_update(res_id,args):
     else:
         meta_attributes = meta_attributes[0]
     
-    meta_attrs = convert_args_to_tuples(meta_values)
+    meta_attrs = utils.convert_args_to_tuples(meta_values)
     for (key,val) in meta_attrs:
         meta_found = False
         for ma in meta_attributes.getElementsByTagName("nvpair"):
@@ -891,7 +964,7 @@ def resource_update(res_id,args):
             continue
 
         op_role = ""
-        op_vars = convert_args_to_tuples(element[1:])
+        op_vars = utils.convert_args_to_tuples(element[1:])
 
         for k,v in op_vars:
             if k == "role":
@@ -913,92 +986,64 @@ def resource_update(res_id,args):
         if updating_op:
             updating_op.parentNode.removeChild(updating_op)
         dom = resource_operation_add(
-            dom, res_id, element, before_op=updating_op_before
+            dom, res_id, element, validate_strict=False,
+            before_op=updating_op_before
         )
 
     if len(instance_attributes.getElementsByTagName("nvpair")) == 0:
         instance_attributes.parentNode.removeChild(instance_attributes)
 
-    if wait:
-        status_new = utils.get_resource_status_for_wait(
-            dom, resource, node_count
-        )
-        wait_for_start, wait_for_stop = utils.get_resource_wait_decision(
-            status_old, status_new
-        )
-        if wait_for_start or wait_for_stop:
-            timeout = utils.pcs_options["--wait"]
-            if timeout is None:
-                timeout = utils.get_resource_op_timeout(
-                    dom, res_id, "start" if wait_for_start else "stop"
-                )
-            elif not timeout.isdigit():
-                utils.err("You must specify the number of seconds to wait")
-        else:
-            timeout = 0
-
     utils.replace_cib_configuration(dom)
 
-    if wait:
-        if wait_for_start or wait_for_stop:
-            success, message = utils.is_resource_started(
-                res_id, int(timeout), wait_for_stop,
-                count=status_new["instances"]
-            )
-            if success:
-                print message
-            else:
-                utils.err("Unable to start '%s'\n%s" % (res_id, message))
+    if "--wait" in utils.pcs_options:
+        args = ["crm_resource", "--wait"]
+        if wait_timeout:
+            args.extend(["--timeout=%s" % wait_timeout])
+        output, retval = utils.run(args)
+        running_on = utils.resource_running_on(res_id)
+        if retval == 0:
+            print(running_on["message"])
         else:
-            print utils.resource_running_on(res_id)["message"]
-
-def resource_update_clone_master(dom, clone, clone_type, res_id, args, wait):
-    if wait:
-        node_count = len(utils.getNodesFromPacemaker())
-        status_old = utils.get_resource_status_for_wait(dom, clone, node_count)
-
+            msg = []
+            if retval == PACEMAKER_WAIT_TIMEOUT_STATUS:
+                msg.append("waiting timeout")
+            msg.append(running_on["message"])
+            if retval != 0 and output:
+                msg.append("\n" + output)
+            utils.err("\n".join(msg).strip())
+
+def resource_update_clone_master(
+    dom, clone, clone_type, res_id, args, wait, wait_timeout
+):
     if clone_type == "clone":
-        dom = resource_clone_create(dom, [res_id] + args, True)
+        dom, clone_id = resource_clone_create(dom, [res_id] + args, True)
     elif clone_type == "master":
-        dom = resource_master_create(dom, [res_id] + args, True)
-
-    if wait:
-        status_new = utils.get_resource_status_for_wait(dom, clone, node_count)
-        wait_for_start, wait_for_stop = utils.get_resource_wait_decision(
-            status_old, status_new
-        )
-        if wait_for_start or wait_for_stop:
-            timeout = utils.pcs_options["--wait"]
-            if timeout is None:
-                timeout = utils.get_resource_op_timeout(
-                    dom, res_id, "start" if wait_for_start else "stop"
-                )
-            elif not timeout.isdigit():
-                utils.err("You must specify the number of seconds to wait")
-        else:
-            timeout = 0
+        dom, master_id = resource_master_create(dom, [res_id] + args, True)
 
     dom = utils.replace_cib_configuration(dom)
 
     if wait:
-        if wait_for_start or wait_for_stop:
-            success, message = utils.is_resource_started(
-                clone.getAttribute("id"), int(timeout), wait_for_stop,
-                count=status_new["instances"]
-            )
-            if success:
-                print message
-            else:
-                utils.err(
-                    "Unable to start '%s'\n%s"
-                    % (clone.getAttribute("id"), message)
-                )
+        args = ["crm_resource", "--wait"]
+        if wait_timeout:
+            args.extend(["--timeout=%s" % wait_timeout])
+        output, retval = utils.run(args)
+        running_on = utils.resource_running_on(clone.getAttribute("id"))
+        if retval == 0:
+            print(running_on["message"])
         else:
-            print utils.resource_running_on(clone.getAttribute("id"))["message"]
+            msg = []
+            if retval == PACEMAKER_WAIT_TIMEOUT_STATUS:
+                msg.append("waiting timeout")
+            msg.append(running_on["message"])
+            if retval != 0 and output:
+                msg.append("\n" + output)
+            utils.err("\n".join(msg).strip())
 
     return dom
 
-def resource_operation_add(dom, res_id, argv, validate=True, before_op=None):
+def resource_operation_add(
+    dom, res_id, argv, validate=True, validate_strict=True, before_op=None
+):
     if len(argv) < 1:
         usage.resource(["op"])
         sys.exit(1)
@@ -1008,7 +1053,7 @@ def resource_operation_add(dom, res_id, argv, validate=True, before_op=None):
         utils.err ("Unable to find resource: %s" % res_id)
 
     op_name = argv.pop(0)
-    op_properties = convert_args_to_tuples(argv)
+    op_properties = utils.convert_args_to_tuples(argv)
 
     if validate:
         if "=" in op_name:
@@ -1073,8 +1118,8 @@ def resource_operation_add(dom, res_id, argv, validate=True, before_op=None):
     else:
         operations = operations[0]
         if validate:
-            duplicate_op = utils.operation_exists(operations, op_el)
-            if duplicate_op:
+            duplicate_op_list = utils.operation_exists(operations, op_el)
+            if duplicate_op_list:
                 utils.err(
                     "operation %s with interval %ss already specified for %s:\n%s"
                     % (
@@ -1083,9 +1128,25 @@ def resource_operation_add(dom, res_id, argv, validate=True, before_op=None):
                             op_el.getAttribute("interval"), True
                         ),
                         res_id,
-                        operation_to_string(duplicate_op)
+                        "\n".join([
+                            operation_to_string(op) for op in duplicate_op_list
+                        ])
                     )
                 )
+            if validate_strict and "--force" not in utils.pcs_options:
+                duplicate_op_list = utils.operation_exists_by_name(
+                    operations, op_el
+                )
+                if duplicate_op_list:
+                    msg = ("operation {action} already specified for {res}"
+                        + ", use --force to override:\n{op}")
+                    utils.err(msg.format(
+                        action=op_el.getAttribute("name"),
+                        res=res_id,
+                        op="\n".join([
+                            operation_to_string(op) for op in duplicate_op_list
+                        ])
+                    ))
 
     operations.insertBefore(op_el, before_op)
     return dom
@@ -1121,7 +1182,7 @@ def resource_operation_remove(res_id, argv):
     if len(argv) == 0:
         remove_all = True
 
-    op_properties = convert_args_to_tuples(argv)
+    op_properties = utils.convert_args_to_tuples(argv)
     op_properties.append(('name', op_name))
     found_match = False
     for op in resource.getElementsByTagName("op"):
@@ -1152,102 +1213,41 @@ def resource_operation_remove(res_id, argv):
 
 def resource_meta(res_id, argv):
     dom = utils.get_cib_dom()
-    allowed_elements = ["primitive","group","clone","master"]
-    elems = []
-    element_found = False
-    for ae in allowed_elements:
-        elems = elems + dom.getElementsByTagName(ae)
-    for elem in elems:
-        if elem.getAttribute("id") == res_id:
-            element_found = True
-            break
+    resource_el = utils.dom_get_any_resource(dom, res_id)
 
-    if not element_found:
+    if resource_el is None:
         utils.err("unable to find a resource/clone/master/group: %s" % res_id)
 
-    # Make sure we only check direct children for meta_attributes
-    meta_attributes = []
-    for child in elem.childNodes:
-        if child.nodeType == child.ELEMENT_NODE and child.tagName == "meta_attributes":
-            meta_attributes.append(child)
-
-    if len(meta_attributes) == 0:
-        meta_attributes = dom.createElement("meta_attributes")
-        meta_attributes.setAttribute("id", res_id + "-meta_attributes")
-        elem.appendChild(meta_attributes)
-    else:
-        meta_attributes = meta_attributes[0]
-
-    wait = False
     if "--wait" in utils.pcs_options:
-        if utils.usefile:
-            utils.err("Cannot use '-f' together with '--wait'")
-        wait = True
-        node_count = len(utils.getNodesFromPacemaker())
-        status_old = utils.get_resource_status_for_wait(dom, elem, node_count)
+        wait_timeout = utils.validate_wait_get_timeout()
 
-    update_meta_attributes(
-        meta_attributes,
-        convert_args_to_tuples(argv),
-        res_id + "-meta_attributes-"
-    )
-
-    if wait:
-        status_new = utils.get_resource_status_for_wait(dom, elem, node_count)
-        wait_for_start, wait_for_stop = utils.get_resource_wait_decision(
-            status_old, status_new
-        )
-        if wait_for_start or wait_for_stop:
-            timeout = utils.pcs_options["--wait"]
-            if timeout is None:
-                timeout = utils.get_resource_op_timeout(
-                    dom, res_id, "start" if wait_for_start else "stop"
-                )
-            elif not timeout.isdigit():
-                utils.err("You must specify the number of seconds to wait")
-        else:
-            timeout = 0
+    utils.dom_update_meta_attr(resource_el, utils.convert_args_to_tuples(argv))
 
     utils.replace_cib_configuration(dom)
 
-    if wait:
-        if wait_for_start or wait_for_stop:
-            success, message = utils.is_resource_started(
-                res_id, int(timeout), wait_for_stop, count=status_new["instances"]
-            )
-            if success:
-                print message
-            else:
-                utils.err("Unable to start '%s'\n%s" % (res_id, message))
+    if "--wait" in utils.pcs_options:
+        args = ["crm_resource", "--wait"]
+        if wait_timeout:
+            args.extend(["--timeout=%s" % wait_timeout])
+        output, retval = utils.run(args)
+        running_on = utils.resource_running_on(res_id)
+        if retval == 0:
+            print(running_on["message"])
         else:
-            print utils.resource_running_on(res_id)["message"]
-
-def update_meta_attributes(meta_attributes, meta_attrs, id_prefix):
-    dom = meta_attributes.ownerDocument
-    for (key,val) in meta_attrs:
-        meta_found = False
-        for ma in meta_attributes.getElementsByTagName("nvpair"):
-            if ma.getAttribute("name") == key:
-                meta_found = True
-                if val == "":
-                    meta_attributes.removeChild(ma)
-                else:
-                    ma.setAttribute("value", val)
-                break
-        if not meta_found:
-            ma = dom.createElement("nvpair")
-            ma.setAttribute("id", id_prefix + key)
-            ma.setAttribute("name", key)
-            ma.setAttribute("value", val)
-            meta_attributes.appendChild(ma)
-    return meta_attributes
+            msg = []
+            if retval == PACEMAKER_WAIT_TIMEOUT_STATUS:
+                msg.append("waiting timeout")
+            msg.append(running_on["message"])
+            if retval != 0 and output:
+                msg.append("\n" + output)
+            utils.err("\n".join(msg).strip())
 
 def convert_args_to_meta_attrs(meta_attrs, ra_id):
     if len(meta_attrs) == 0:
         return []
 
     meta_vars = []
-    tuples = convert_args_to_tuples(meta_attrs)
+    tuples = utils.convert_args_to_tuples(meta_attrs)
     attribute_id = ra_id + "-meta_attributes"
     for (a,b) in tuples:
         meta_vars.append(("nvpair",[("name",a),("value",b),("id",attribute_id+"-"+a)],[]))
@@ -1255,7 +1255,7 @@ def convert_args_to_meta_attrs(meta_attrs, ra_id):
     return [ret]
 
 def convert_args_to_instance_variables(ra_values, ra_id):
-    tuples = convert_args_to_tuples(ra_values)
+    tuples = utils.convert_args_to_tuples(ra_values)
     ivs = []
     attribute_id = ra_id + "-instance_attributes"
     for (a,b) in tuples:
@@ -1263,15 +1263,6 @@ def convert_args_to_instance_variables(ra_values, ra_id):
     ret = ("instance_attributes", [[("id"),(attribute_id)]], ivs)
     return [ret]
 
-# Passed an array of strings ["a=b","c=d"], return array of tuples
-# [("a","b"),("c","d")]
-def convert_args_to_tuples(ra_values):
-    ret = []
-    for ra_val in ra_values:
-        if ra_val.count("=") != 0:
-            split_val = ra_val.split("=", 1)
-            ret.append((split_val[0],split_val[1]))
-    return ret
 
 # Passed a resource type (ex. ocf:heartbeat:IPaddr2 or IPaddr2) and returns
 # a list of tuples mapping the types to xml attributes
@@ -1281,6 +1272,8 @@ def get_full_ra_type(ra_type, return_string = False):
             ra_type = "ocf:heartbeat:" + ra_type
         elif os.path.isfile("/usr/lib/ocf/resource.d/pacemaker/%s" % ra_type):
             ra_type = "ocf:pacemaker:" + ra_type
+        elif os.path.isfile("/usr/share/pacemaker/nagios/plugins-metadata/%s.xml" % ra_type):
+            ra_type = "nagios:" + ra_type
         else:
             ra_type = "ocf:heartbeat:" + ra_type
 
@@ -1323,24 +1316,26 @@ def resource_group(argv):
         resource_ids = argv
         cib = resource_group_add(utils.get_cib_dom(), group_name, resource_ids)
 
-        wait = False
         if "--wait" in utils.pcs_options:
-            if utils.usefile:
-                utils.err("Cannot use '-f' together with '--wait'")
-            wait = True
-            timeout = utils.pcs_options["--wait"]
-            if timeout and not timeout.isdigit():
-                utils.err("You must specify the number of seconds to wait")
-            output, transitions_dom, new_cib_dom = utils.simulate_cib(cib)
-            op_list = utils.get_operations_from_transitions(transitions_dom)
-            my_op_list = [op for op in op_list if op[0] in resource_ids]
+            wait_timeout = utils.validate_wait_get_timeout()
 
         utils.replace_cib_configuration(cib)
 
-        if wait:
-            if my_op_list:
-                utils.wait_for_primitive_ops_to_process(my_op_list, timeout)
-            print utils.resource_running_on(group_name)["message"]
+        if "--wait" in utils.pcs_options:
+            args = ["crm_resource", "--wait"]
+            if wait_timeout:
+                args.extend(["--timeout=%s" % wait_timeout])
+            output, retval = utils.run(args)
+            running_on = utils.resource_running_on(group_name)
+            if retval == 0:
+                print(running_on["message"])
+            else:
+                msg = []
+                if retval == PACEMAKER_WAIT_TIMEOUT_STATUS:
+                    msg.append("waiting timeout")
+                if output:
+                    msg.append("\n" + output)
+                utils.err("\n".join(msg).strip())
 
     elif (group_cmd == "list"):
         resource_group_list(argv)
@@ -1351,27 +1346,28 @@ def resource_group(argv):
         group_name = argv.pop(0)
         resource_ids = argv
 
-        cib_dom, removed_resources = resource_group_rm(
+        cib_dom = resource_group_rm(
             utils.get_cib_dom(), group_name, resource_ids
         )
 
-        wait = False
         if "--wait" in utils.pcs_options:
-            if utils.usefile:
-                utils.err("Cannot use '-f' together with '--wait'")
-            wait = True
-            timeout = utils.pcs_options["--wait"]
-            if timeout and not timeout.isdigit():
-                utils.err("You must specify the number of seconds to wait")
-            output, transitions_dom, new_cib_dom = utils.simulate_cib(cib_dom)
-            op_list = utils.get_operations_from_transitions(transitions_dom)
-            my_op_list = [op for op in op_list if op[0] in removed_resources]
+            wait_timeout = utils.validate_wait_get_timeout()
 
         utils.replace_cib_configuration(cib_dom)
 
-        if wait:
-            if my_op_list:
-                utils.wait_for_primitive_ops_to_process(my_op_list, timeout)
+        if "--wait" in utils.pcs_options:
+            args = ["crm_resource", "--wait"]
+            if wait_timeout:
+                args.extend(["--timeout=%s" % wait_timeout])
+            output, retval = utils.run(args)
+            if retval != 0:
+                msg = []
+                if retval == PACEMAKER_WAIT_TIMEOUT_STATUS:
+                    msg.append("waiting timeout")
+                if output:
+                    msg.append("\n" + output)
+                utils.err("\n".join(msg).strip())
+
     else:
         usage.resource()
         sys.exit(1)
@@ -1384,44 +1380,29 @@ def resource_clone(argv):
     res = argv[0]
     cib_dom = utils.get_cib_dom()
 
-    wait = False
     if "--wait" in utils.pcs_options:
-        if utils.usefile:
-            utils.err("Cannot use '-f' together with '--wait'")
-        if not utils.is_resource_started(res, 0)[0]:
-            print "Warning: Cannot use '--wait' on non-running resources"
-        else:
-            wait = True
-    if wait:
-        wait_op = "start"
-        for arg in argv:
-            if arg.lower() == "target-role=stopped":
-                wait_op = "stop"
-        timeout = utils.pcs_options["--wait"]
-        if timeout is None:
-            timeout = utils.get_resource_op_timeout(cib_dom, res, wait_op)
-        elif not timeout.isdigit():
-            utils.err("You must specify the number of seconds to wait")
+        wait_timeout = utils.validate_wait_get_timeout()
 
-    cib_dom = resource_clone_create(cib_dom, argv)
+    cib_dom, clone_id = resource_clone_create(cib_dom, argv)
     cib_dom = constraint.constraint_resource_update(res, cib_dom)
     utils.replace_cib_configuration(cib_dom)
 
-    if wait:
-        count = utils.count_expected_resource_instances(
-            utils.dom_get_clone(cib_dom, res + "-clone"),
-            len(utils.getNodesFromPacemaker())
-        )
-        success, message = utils.is_resource_started(
-            res, int(timeout), wait_op == "stop", count=count
-        )
-        if success:
-            print message
+    if "--wait" in utils.pcs_options:
+        args = ["crm_resource", "--wait"]
+        if wait_timeout:
+            args.extend(["--timeout=%s" % wait_timeout])
+        output, retval = utils.run(args)
+        running_on = utils.resource_running_on(clone_id)
+        if retval == 0:
+            print(running_on["message"])
         else:
-            utils.err(
-                "Unable to %s clones of '%s'\n%s"
-                % (wait_op, res, message)
-            )
+            msg = []
+            if retval == PACEMAKER_WAIT_TIMEOUT_STATUS:
+                msg.append("waiting timeout")
+            msg.append(running_on["message"])
+            if output:
+                msg.append("\n" + output)
+            utils.err("\n".join(msg).strip())
 
 def resource_clone_create(cib_dom, argv, update_existing=False):
     name = argv.pop(0)
@@ -1445,7 +1426,6 @@ def resource_clone_create(cib_dom, argv, update_existing=False):
     if element.parentNode.tagName == "group" and element.parentNode.getElementsByTagName("primitive").length <= 1:
         element.parentNode.parentNode.removeChild(element.parentNode)
 
-    meta = None
     if update_existing:
         if element.parentNode.tagName != "clone":
             utils.err("%s is not currently a clone" % name)
@@ -1460,17 +1440,13 @@ def resource_clone_create(cib_dom, argv, update_existing=False):
                 break
     else:
         clone = cib_dom.createElement("clone")
-        clone.setAttribute("id",name + "-clone")
+        clone.setAttribute("id", utils.find_unique_id(cib_dom, name + "-clone"))
         clone.appendChild(element)
         re.appendChild(clone)
-    if meta is None:
-        meta = cib_dom.createElement("meta_attributes")
-        meta.setAttribute("id",name + "-clone-meta")
-        clone.appendChild(meta)
 
-    update_meta_attributes(meta, convert_args_to_tuples(argv), name + "-")
+    utils.dom_update_meta_attr(clone, utils.convert_args_to_tuples(argv))
 
-    return cib_dom
+    return cib_dom, clone.getAttribute("id")
 
 def resource_clone_master_remove(argv):
     if len(argv) != 1:
@@ -1481,7 +1457,7 @@ def resource_clone_master_remove(argv):
     dom = utils.get_cib_dom()
     re = dom.documentElement.getElementsByTagName("resources")[0]
 
-    found = False
+    # get the resource no matter if user entered a clone or a cloned resource
     resource = (
         utils.dom_get_resource(re, name)
         or
@@ -1491,43 +1467,48 @@ def resource_clone_master_remove(argv):
     )
     if not resource:
         utils.err("could not find resource: %s" % name)
-    clone = resource.parentNode
     resource_id = resource.getAttribute("id")
-    clone_id = clone.getAttribute("id")
+    clone = utils.dom_get_resource_clone_ms_parent(re, resource_id)
+    if not clone:
+        utils.err("'%s' is not a clone resource" % name)
 
-    wait = False
     if "--wait" in utils.pcs_options:
-        if utils.usefile:
-            utils.err("Cannot use '-f' together with '--wait'")
-        if not utils.is_resource_started(resource_id, 0)[0]:
-            print "Warning: Cannot use '--wait' on non-running resources"
-        else:
-            wait = True
-    if wait:
-        timeout = utils.pcs_options["--wait"]
-        if timeout is None:
-            timeout = utils.get_resource_op_timeout(dom, resource_id, "stop")
-        elif not timeout.isdigit():
-            utils.err("You must specify the number of seconds to wait")
+        wait_timeout = utils.validate_wait_get_timeout()
 
-    constraint.remove_constraints_containing(
-        clone.getAttribute("id"), passed_dom=dom
-    )
-    clone.parentNode.appendChild(resource)
-    clone.parentNode.removeChild(clone)
+    # if user requested uncloning a resource contained in a cloned group
+    # remove the resource from the group and leave the clone itself alone
+    # unless the resource is the last one in the group
+    clone_child = utils.dom_get_clone_ms_resource(re, clone.getAttribute("id"))
+    if (
+        clone_child.tagName == "group"
+        and
+        resource.tagName != "group"
+        and
+        len(clone_child.getElementsByTagName("primitive")) > 1
+    ):
+        resource_group_rm(dom, clone_child.getAttribute("id"), [resource_id])
+    else:
+        remove_resource_references(dom, clone.getAttribute("id"))
+        clone.parentNode.appendChild(resource)
+        clone.parentNode.removeChild(clone)
     utils.replace_cib_configuration(dom)
 
-    if wait:
-        running, message = utils.is_resource_started(
-            resource_id, int(timeout), count=1
-        )
-        if running:
-            print message
+    if "--wait" in utils.pcs_options:
+        args = ["crm_resource", "--wait"]
+        if wait_timeout:
+            args.extend(["--timeout=%s" % wait_timeout])
+        output, retval = utils.run(args)
+        running_on = utils.resource_running_on(resource_id)
+        if retval == 0:
+            print(running_on["message"])
         else:
-            utils.err(
-                "Unable to start single instance of '%s'\n%s"
-                % (resource_id, message)
-            )
+            msg = []
+            if retval == PACEMAKER_WAIT_TIMEOUT_STATUS:
+                msg.append("waiting timeout")
+            msg.append(running_on["message"])
+            if output:
+                msg.append("\n" + output)
+            utils.err("\n".join(msg).strip())
 
 def resource_master(argv):
     non_option_args_count = 0
@@ -1539,53 +1520,43 @@ def resource_master(argv):
         sys.exit(1)
     if non_option_args_count == 1:
         res_id = argv[0]
-        master_id = res_id + "-master"
+        master_id = None
     else:
         master_id = argv.pop(0)
         res_id = argv[0]
     cib_dom = utils.get_cib_dom()
 
-    wait = False
     if "--wait" in utils.pcs_options:
-        if utils.usefile:
-            utils.err("Cannot use '-f' together with '--wait'")
-        if not utils.is_resource_started(res_id, 0)[0]:
-            print "Warning: Cannot use '--wait' on non-running resources"
-        else:
-            wait = True
-    if wait:
-        wait_op = "promote"
-        for arg in argv:
-            if arg.lower() == "target-role=stopped":
-                wait_op = "stop"
-        timeout = utils.pcs_options["--wait"]
-        if timeout is None:
-            timeout = utils.get_resource_op_timeout(cib_dom, res_id, wait_op)
-        elif not timeout.isdigit():
-            utils.err("You must specify the number of seconds to wait")
+        wait_timeout = utils.validate_wait_get_timeout()
 
-    cib_dom = resource_master_create(cib_dom, argv, False, master_id)
+    cib_dom, master_id = resource_master_create(cib_dom, argv, False, master_id)
     cib_dom = constraint.constraint_resource_update(res_id, cib_dom)
     utils.replace_cib_configuration(cib_dom)
 
-    if wait:
-        count = utils.count_expected_resource_instances(
-            utils.dom_get_master(cib_dom, master_id),
-            len(utils.getNodesFromPacemaker())
-        )
-        success, message = utils.is_resource_started(
-            res_id, int(timeout), wait_op == "stop", count=count
-        )
-        if success:
-            print message
+    if "--wait" in utils.pcs_options:
+        args = ["crm_resource", "--wait"]
+        if wait_timeout:
+            args.extend(["--timeout=%s" % wait_timeout])
+        output, retval = utils.run(args)
+        running_on = utils.resource_running_on(master_id)
+        if retval == 0:
+            print(running_on["message"])
         else:
-            utils.err("unable to %s '%s'\n%s" % (wait_op, res_id, message))
+            msg = []
+            if retval == PACEMAKER_WAIT_TIMEOUT_STATUS:
+                msg.append("waiting timeout")
+            msg.append(running_on["message"])
+            if output:
+                msg.append("\n" + output)
+            utils.err("\n".join(msg).strip())
 
 def resource_master_create(dom, argv, update=False, master_id=None):
+    master_id_autogenerated = False
     if update:
         master_id = argv.pop(0)
     elif not master_id:
         master_id = argv[0] + "-master"
+        master_id_autogenerated = True
 
     if (update):
         master_found = False
@@ -1598,13 +1569,13 @@ def resource_master_create(dom, argv, update=False, master_id=None):
             utils.err("Unable to find multi-state resource with id %s" % master_id)
     else:
         rg_id = argv.pop(0)
-        if utils.does_id_exist(dom, master_id):
+        if not master_id_autogenerated and utils.does_id_exist(dom, master_id):
             utils.err("%s already exists in the cib" % master_id)
 
-        if utils.is_resource_clone(rg_id):
+        if utils.dom_get_resource_clone(dom, rg_id):
             utils.err("%s is already a clone resource" % rg_id)
 
-        if utils.is_resource_masterslave(rg_id):
+        if utils.dom_get_resource_masterslave(dom, rg_id):
             utils.err("%s is already a master/slave resource" % rg_id)
 
         resources = dom.getElementsByTagName("resources")[0]
@@ -1622,31 +1593,22 @@ def resource_master_create(dom, argv, update=False, master_id=None):
             resource.parentNode.parentNode.removeChild(resource.parentNode)
         
         master_element = dom.createElement("master")
-        master_element.setAttribute("id", master_id)
+        if master_id_autogenerated:
+            master_element.setAttribute(
+                "id", utils.find_unique_id(dom, master_id)
+            )
+        else:
+            master_element.setAttribute("id", master_id)
         resource.parentNode.removeChild(resource)
         master_element.appendChild(resource)
         resources.appendChild(master_element)
 
     if len(argv) > 0:
-        meta = None
-        for child in master_element.childNodes:
-            if child.nodeType != xml.dom.Node.ELEMENT_NODE:
-                continue
-            if child.tagName == "meta_attributes":
-                meta = child
-        if meta == None:
-            meta = dom.createElement("meta_attributes")
-            meta.setAttribute("id", master_id + "-meta_attributes")
-            master_element.appendChild(meta)
-
-        update_meta_attributes(
-            meta,
-            convert_args_to_tuples(argv),
-            meta.getAttribute("id") + "-"
+        utils.dom_update_meta_attr(
+            master_element,
+            utils.convert_args_to_tuples(argv)
         )
-        if len(meta.getElementsByTagName("nvpair")) == 0:
-            master_element.removeChild(meta)
-    return dom
+    return dom, master_element.getAttribute("id")
 
 def resource_master_remove(argv):
     if len(argv) < 1:
@@ -1679,9 +1641,11 @@ def resource_master_remove(argv):
         constraints_element = constraints_element[0]
         constraints = []
         for resource_id in resources_to_cleanup:
-            constraint.remove_constraints_containing(resource_id, constraints_element)
+            remove_resource_references(
+                dom, resource_id, constraints_element=constraints_element
+            )
     master.parentNode.removeChild(master)
-    print "Removing Master - " + master_id
+    print("Removing Master - " + master_id)
     utils.replace_cib_configuration(dom)
 
 def resource_remove(resource_id, output = True):
@@ -1691,15 +1655,44 @@ def resource_remove(resource_id, output = True):
         resource_id = cloned_resource.getAttribute("id")
 
     if utils.does_exist('//group[@id="'+resource_id+'"]'):
-        print "Removing group: " + resource_id + " (and all resources within group)"
+        print("Removing group: " + resource_id + " (and all resources within group)")
         group = utils.get_cib_xpath('//group[@id="'+resource_id+'"]')
         group_dom = parseString(group)
-        print "Stopping all resources in group: %s..." % resource_id
+        print("Stopping all resources in group: %s..." % resource_id)
         resource_disable([resource_id])
-        for res in group_dom.documentElement.getElementsByTagName("primitive"):
-            res_id = res.getAttribute("id")
-            if not "--force" in utils.pcs_options and not utils.usefile and not utils.is_resource_started(res_id, 15, True)[0]:
-                utils.err("Unable to stop group: %s before deleting (re-run with --force to force deletion)" % resource_id)
+        if not "--force" in utils.pcs_options and not utils.usefile:
+            output, retval = utils.run(["crm_resource", "--wait"])
+            if retval != 0 and "unrecognized option '--wait'" in output:
+                output = ""
+                retval = 0
+                for res in reversed(
+                    group_dom.documentElement.getElementsByTagName("primitive")
+                ):
+                    res_id = res.getAttribute("id")
+                    res_stopped = False
+                    for i in range(15):
+                        time.sleep(1)
+                        if not utils.resource_running_on(res_id)["is_running"]:
+                            res_stopped = True
+                            break
+                    if not res_stopped:
+                        break
+            stopped = True
+            state = utils.getClusterState()
+            for res in group_dom.documentElement.getElementsByTagName("primitive"):
+                res_id = res.getAttribute("id")
+                if utils.resource_running_on(res_id, state)["is_running"]:
+                    stopped = False
+                    break
+            if not stopped:
+                msg = [
+                    "Unable to stop group: %s before deleting "
+                    "(re-run with --force to force deletion)"
+                    % resource_id
+                ]
+                if retval != 0 and output:
+                    msg.append("\n" + output)
+                utils.err("\n".join(msg).strip())
         for res in group_dom.documentElement.getElementsByTagName("primitive"):
             resource_remove(res.getAttribute("id"))
         sys.exit(0)
@@ -1712,20 +1705,44 @@ def resource_remove(resource_id, output = True):
         if utils.does_exist('//resources/master[@id="'+resource_id+'"]'):
             return resource_master_remove([resource_id])
 
-        utils.err("Resource does not exist.")
+        utils.err("Resource '{0}' does not exist.".format(resource_id))
 
     if (group != ""):
         num_resources_in_group = len(parseString(group).documentElement.getElementsByTagName("primitive"))
 
-    if not "--force" in utils.pcs_options and not utils.usefile and not utils.is_resource_started(resource_id, 0, True)[0]:
+    if (
+        not "--force" in utils.pcs_options
+        and
+        not utils.usefile
+        and
+        utils.resource_running_on(resource_id)["is_running"]
+    ):
         sys.stdout.write("Attempting to stop: "+ resource_id + "...")
         sys.stdout.flush()
         resource_disable([resource_id])
-        if not utils.is_resource_started(resource_id, 15, True)[0]:
-            utils.err("Unable to stop: %s before deleting (re-run with --force to force deletion)" % resource_id)
-        print "Stopped"
-
-    constraint.remove_constraints_containing(resource_id,output)
+        output, retval = utils.run(["crm_resource", "--wait"])
+        if retval != 0 and "unrecognized option '--wait'" in output:
+            output = ""
+            retval = 0
+            for i in range(15):
+                time.sleep(1)
+                if not utils.resource_running_on(resource_id)["is_running"]:
+                    break
+        if utils.resource_running_on(resource_id)["is_running"]:
+            msg = [
+                "Unable to stop: %s before deleting "
+                "(re-run with --force to force deletion)"
+                % resource_id
+            ]
+            if retval != 0 and output:
+                msg.append("\n" + output)
+            utils.err("\n".join(msg).strip())
+        print("Stopped")
+
+    utils.replace_cib_configuration(
+        remove_resource_references(utils.get_cib_dom(), resource_id, output)
+    )
+    dom = utils.get_cib_dom()
     resource_el = utils.dom_get_resource(dom, resource_id)
     if resource_el:
         remote_node = utils.dom_get_resource_remote_node_name(resource_el)
@@ -1746,7 +1763,7 @@ def resource_remove(resource_id, output = True):
         else:
             args = ["cibadmin", "-o", "resources", "-D", "--xpath", "//primitive[@id='"+resource_id+"']"]
         if output == True:
-            print "Deleting Resource - " + resource_id
+            print("Deleting Resource - " + resource_id)
         output,retVal = utils.run(args)
         if retVal != 0:
             utils.err("unable to remove resource: %s, it may still be referenced in constraints." % resource_id)
@@ -1760,24 +1777,38 @@ def resource_remove(resource_id, output = True):
             msg = "and group and M/S"
             to_remove_dom = parseString(top_master).getElementsByTagName("master")
             to_remove_id = to_remove_dom[0].getAttribute("id")
-            constraint.remove_constraints_containing(to_remove_dom[0].getElementsByTagName("group")[0].getAttribute("id"))
+            utils.replace_cib_configuration(
+                remove_resource_references(
+                    utils.get_cib_dom(),
+                    to_remove_dom[0].getElementsByTagName("group")[0].getAttribute("id")
+                )
+            )
         elif top_clone != "":
             to_remove_xpath = top_clone_xpath
             msg = "and group and clone"
             to_remove_dom = parseString(top_clone).getElementsByTagName("clone")
             to_remove_id = to_remove_dom[0].getAttribute("id")
-            constraint.remove_constraints_containing(to_remove_dom[0].getElementsByTagName("group")[0].getAttribute("id"))
+            utils.replace_cib_configuration(
+                remove_resource_references(
+                    utils.get_cib_dom(),
+                    to_remove_dom[0].getElementsByTagName("group")[0].getAttribute("id")
+                )
+            )
         else:
             to_remove_xpath = group_xpath
             msg = "and group"
             to_remove_dom = parseString(group).getElementsByTagName("group")
             to_remove_id = to_remove_dom[0].getAttribute("id")
 
-        constraint.remove_constraints_containing(to_remove_id,output)
+        utils.replace_cib_configuration(
+            remove_resource_references(
+                utils.get_cib_dom(), to_remove_id, output
+            )
+        )
 
         args = ["cibadmin", "-o", "resources", "-D", "--xpath", to_remove_xpath]
         if output == True:
-            print "Deleting Resource ("+msg+") - " + resource_id
+            print("Deleting Resource ("+msg+") - " + resource_id)
         cmdoutput,retVal = utils.run(args)
         if retVal != 0:
             if output == True:
@@ -1785,20 +1816,22 @@ def resource_remove(resource_id, output = True):
             return False
     return True
 
+def remove_resource_references(
+    dom, resource_id, output=False, constraints_element=None
+):
+    constraint.remove_constraints_containing(
+        resource_id, output, constraints_element, dom
+    )
+    stonith.stonith_level_rm_device(dom, resource_id)
+    return dom
+
 # This removes a resource from a group, but keeps it in the config
 def resource_group_rm(cib_dom, group_name, resource_ids):
     dom = cib_dom.getElementsByTagName("configuration")[0]
-    group_match = None
-
-    all_resources = False
-    if len(resource_ids) == 0:
-        all_resources = True
 
-    for group in dom.getElementsByTagName("group"):
-        if group.getAttribute("id") == group_name:
-            group_match = group
-            break
+    all_resources = len(resource_ids) == 0
 
+    group_match = utils.dom_get_group(dom, group_name)
     if not group_match:
         utils.err("Group '%s' does not exist" % group_name)
 
@@ -1806,34 +1839,43 @@ def resource_group_rm(cib_dom, group_name, resource_ids):
         utils.err("Groups that have more than one resource and are master/slave resources cannot be removed.  The group may be deleted with 'pcs resource delete %s'." % group_name)
 
     resources_to_move = []
-    resources_to_move_id = []
 
     if all_resources:
         for resource in group_match.getElementsByTagName("primitive"):
             resources_to_move.append(resource)
     else:
         for resource_id in resource_ids:
-            found_resource = False
-            for resource in group_match.getElementsByTagName("primitive"):
-                if resource.getAttribute("id") == resource_id:
-                    found_resource = True
-                    resources_to_move.append(resource)
-                    break
-            if not found_resource:
+            resource = utils.dom_get_resource(group_match, resource_id)
+            if resource:
+                resources_to_move.append(resource)
+            else:
                 utils.err("Resource '%s' does not exist in group '%s'" % (resource_id, group_name))
 
+    if group_match.parentNode.tagName in ["clone", "master"]:
+        res_in_group = len(group_match.getElementsByTagName("primitive"))
+        if (
+            res_in_group > 1
+            and
+            (all_resources or (len(resources_to_move) == res_in_group))
+        ):
+            utils.err("Cannot remove more than one resource from cloned group")
+
+    target_node = group_match.parentNode
+    if (
+        target_node.tagName in ["clone", "master"]
+        and
+        len(group_match.getElementsByTagName("primitive")) > 1
+    ):
+        target_node = dom.getElementsByTagName("resources")[0]
     for resource in resources_to_move:
-        resources_to_move_id.append(resource.getAttribute("id"))
-        parent = resource.parentNode
         resource.parentNode.removeChild(resource)
-        parent.parentNode.appendChild(resource)
-
-    constraint.remove_constraints_containing(group_name, True, passed_dom=dom)
+        target_node.appendChild(resource)
 
     if len(group_match.getElementsByTagName("primitive")) == 0:
         group_match.parentNode.removeChild(group_match)
+        remove_resource_references(dom, group_name, output=True)
 
-    return cib_dom, resources_to_move_id
+    return cib_dom
 
 def resource_group_add(cib_dom, group_name, resource_ids):
     resources_element = cib_dom.getElementsByTagName("resources")[0]
@@ -1911,8 +1953,17 @@ def resource_group_add(cib_dom, group_name, resource_ids):
                 mygroup.insertBefore(resource, before)
             else:
                 mygroup.appendChild(resource)
-            if oldParent.tagName == "group" and len(oldParent.getElementsByTagName("primitive")) == 0:
-                oldParent.parentNode.removeChild(oldParent)
+            if (
+                oldParent.tagName == "group"
+                and
+                len(oldParent.getElementsByTagName("primitive")) == 0
+            ):
+                if oldParent.parentNode.tagName in ["clone", "master"]:
+                    oldParent.parentNode.parentNode.removeChild(
+                        oldParent.parentNode
+                    )
+                else:
+                    oldParent.parentNode.removeChild(oldParent)
         return cib_dom
     else:
         utils.err("No resources to add.")
@@ -1934,10 +1985,10 @@ def resource_group_list(argv):
         elements = [element]
 
     for e in elements:
-        print e.getAttribute("id") + ":",
+        line_parts = [e.getAttribute("id") + ":"]
         for resource in e.getElementsByTagName("primitive"):
-            print resource.getAttribute("id"),
-        print ""
+            line_parts.append(resource.getAttribute("id"))
+        print(" ".join(line_parts))
 
 def resource_show(argv, stonith=False):
     if "--groups" in utils.pcs_options:
@@ -1955,7 +2006,7 @@ def resource_show(argv, stonith=False):
                 print_node(child,1)
         return
 
-    if len(argv) == 0:    
+    if len(argv) == 0:
         output, retval = utils.run(["crm_mon", "-1", "-r"])
         if retval != 0:
             utils.err("unable to get cluster status from crm_mon\n"+output.rstrip())
@@ -1974,26 +2025,25 @@ def resource_show(argv, stonith=False):
                 elif in_resources:
                     if not has_resources:
                         if not stonith:
-                            print "NO resources configured"
+                            print("NO resources configured")
                         else:
-                            print "NO stonith devices configured"
+                            print("NO stonith devices configured")
                     return
                 continue
             if in_resources:
                 if not preg.match(line) and not stonith:
                     has_resources = True
-                    print line
+                    print(line)
                 elif preg.match(line) and stonith:
                     has_resources = True
-                    print line
+                    print(line)
         return
 
-    preg = re.compile(r'.*xml:\n',re.DOTALL)
     root = utils.get_cib_etree()
     resources = root.find(".//resources")
     resource_found = False
     for arg in argv:
-        for child in resources.findall(".//*"):
+        for child in resources.findall(str(".//*")):
             if "id" in child.attrib and child.attrib["id"] == arg and ((stonith and utils.is_stonith_resource(arg)) or (not stonith and not utils.is_stonith_resource(arg))):
                 print_node(child,1)
                 resource_found = True
@@ -2008,21 +2058,10 @@ def resource_disable(argv):
 
     resource = argv[0]
     if not is_managed(resource):
-        print "Warning: '%s' is unmanaged" % resource
+        print("Warning: '%s' is unmanaged" % resource)
 
     if "--wait" in utils.pcs_options:
-        cib_dom = utils.get_cib_dom()
-        resource_wait = utils.dom_get_clone_ms_resource(cib_dom, resource)
-        if resource_wait is not None and resource_wait.tagName == "primitive":
-            resource_wait = resource_wait.getAttribute("id")
-        else:
-            resource_wait = resource
-        wait = utils.pcs_options["--wait"]
-        if wait is None:
-            wait = utils.get_resource_op_timeout(cib_dom, resource_wait, "stop")
-        elif not wait.isdigit():
-            utils.err("%s is not a valid number of seconds to wait" % wait)
-            sys.exit(1)
+        wait_timeout = utils.validate_wait_get_timeout()
 
     args = ["crm_resource", "-r", argv[0], "-m", "-p", "target-role", "-v", "Stopped"]
     output, retval = utils.run(args)
@@ -2030,57 +2069,98 @@ def resource_disable(argv):
         utils.err(output)
 
     if "--wait" in utils.pcs_options:
-        did_stop, message = utils.is_resource_started(
-            resource_wait, int(wait), True
-        )
-        if did_stop:
-            print message
+        args = ["crm_resource", "--wait"]
+        if wait_timeout:
+            args.extend(["--timeout=%s" % wait_timeout])
+        output, retval = utils.run(args)
+        running_on = utils.resource_running_on(resource)
+        if retval == 0 and not running_on["is_running"]:
+            print(running_on["message"])
             return True
         else:
-            utils.err(
-                "unable to stop: '%s', please check logs for failure "
-                    "information\n%s"
-                % (resource, message)
-            )
+            msg = []
+            if retval == PACEMAKER_WAIT_TIMEOUT_STATUS:
+                msg.append("waiting timeout")
+            else:
+                msg.append(
+                    "unable to stop: '%s', please check logs for failure "
+                    "information"
+                    % resource
+                )
+            msg.append(running_on["message"])
+            if retval != 0 and output:
+                msg.append("\n" + output)
+            utils.err("\n".join(msg).strip())
 
 def resource_enable(argv):
     if len(argv) < 1:
         utils.err("You must specify a resource to enable")
 
     resource = argv[0]
-    if not is_managed(resource):
-        print "Warning: '%s' is unmanaged" % resource
+    cib_dom = utils.get_cib_dom()
+
+    resource_clone = (
+        utils.dom_get_clone(cib_dom, resource)
+        or
+        utils.dom_get_master(cib_dom, resource)
+    )
+    if resource_clone:
+        resource_main = utils.dom_elem_get_clone_ms_resource(resource_clone)
+    else:
+        resource_main = (
+            utils.dom_get_resource(cib_dom, resource)
+            or
+            utils.dom_get_group(cib_dom, resource)
+        )
+        if not resource_main:
+            utils.err(
+                "unable to find a resource/clone/master/group: {0}".format(
+                    resource
+                )
+            )
+        resource_clone = utils.dom_elem_get_resource_clone_ms_parent(
+            resource_main
+        )
+    resources_to_enable = [resource_main.getAttribute("id")]
+    if resource_clone:
+        resources_to_enable.append(resource_clone.getAttribute("id"))
+
+    for res in resources_to_enable:
+        if not is_managed(res):
+            print("Warning: '{0}' is unmanaged".format(res))
 
     if "--wait" in utils.pcs_options:
-        cib_dom = utils.get_cib_dom()
-        resource_wait = utils.dom_get_clone_ms_resource(cib_dom, resource)
-        if resource_wait is not None and resource_wait.tagName == "primitive":
-            resource_wait = resource_wait.getAttribute("id")
-        else:
-            resource_wait = resource
-        wait = utils.pcs_options["--wait"]
-        if wait is None:
-            wait = utils.get_resource_op_timeout(cib_dom, resource_wait, "start")
-        elif not wait.isdigit():
-            utils.err("%s is not a valid number of seconds to wait" % wait)
-            sys.exit(1)
+        wait_timeout = utils.validate_wait_get_timeout()
 
-    args = ["crm_resource", "-r", resource, "-m", "-d", "target-role"]
-    output, retval = utils.run(args)
-    if retval != 0:
-        utils.err (output)
+    for res in resources_to_enable:
+        args = ["crm_resource", "-r", res, "-m", "-d", "target-role"]
+        output, retval = utils.run(args)
+        if retval != 0:
+            utils.err (output)
 
     if "--wait" in utils.pcs_options:
-        did_start, message = utils.is_resource_started(resource_wait, int(wait))
-        if did_start:
-            print message
+        args = ["crm_resource", "--wait"]
+        if wait_timeout:
+            args.extend(["--timeout=%s" % wait_timeout])
+        output, retval = utils.run(args)
+        running_on = utils.resource_running_on(resource)
+        if retval == 0 and running_on["is_running"]:
+            print(running_on["message"])
             return True
         else:
-            utils.err(
-                "unable to start: '%s', please check logs for failure "
-                    "information\n%s"
-                % (resource, message)
-            )
+            msg = []
+            if retval == PACEMAKER_WAIT_TIMEOUT_STATUS:
+                msg.append("waiting timeout")
+            else:
+                msg.append(
+                    "unable to start: '%s', please check logs for failure "
+                    "information"
+                    % resource
+                )
+            msg.append(running_on["message"])
+            if retval != 0 and output:
+                msg.append("\n" + output)
+            utils.err("\n".join(msg).strip())
 
 def resource_restart(argv):
     if len(argv) < 1:
@@ -2092,7 +2172,7 @@ def resource_restart(argv):
 
     real_res = utils.dom_get_resource_clone_ms_parent(dom, resource)
     if real_res:
-        print "Warning: using %s... (if a resource is a clone or master/slave you must use the clone or master/slave name" % real_res.getAttribute("id")
+        print("Warning: using %s... (if a resource is a clone or master/slave you must use the clone or master/slave name" % real_res.getAttribute("id"))
         resource = real_res.getAttribute("id")
 
     args = ["crm_resource", "--restart", "--resource", resource]
@@ -2112,34 +2192,71 @@ def resource_restart(argv):
     if retval != 0:
         utils.err(output)
 
-    print "%s successfully restarted" % resource
+    print("%s successfully restarted" % resource)
 
-def resource_force_start(argv):
+def resource_force_action(action, argv):
     if len(argv) < 1:
-        utils.err("You must specify a resource to debug-start")
+        utils.err("You must specify a resource to {0}".format(action))
+    if len(argv) != 1:
+        usage.resource([action])
+        sys.exit(1)
 
-    resource = argv[0]
+    action_command = {
+        "debug-start": "--force-start",
+        "debug-stop": "--force-stop",
+        "debug-promote": "--force-promote",
+        "debug-demote": "--force-demote",
+        "debug-monitor": "--force-check",
+    }
 
-    if utils.is_group(resource):
-        group_resources = utils.get_group_children(resource)
-        utils.err("unable to debug-start a group, try one of the group's resource(s) (%s)" % ",".join(group_resources))
+    if action not in action_command:
+        usage.resource(["debug-"])
+        sys.exit(1)
 
+    resource = argv[0]
     dom = utils.get_cib_dom()
 
+    if not utils.dom_get_any_resource(dom, resource):
+        utils.err(
+            "unable to find a resource/clone/master/group: {0}".format(resource)
+        )
+    if utils.dom_get_group(dom, resource):
+        group_resources = utils.get_group_children(resource)
+        utils.err(
+            "unable to {0} a group, try one of the group's resource(s) ({1})".format(
+                action, ",".join(group_resources)
+            )
+        )
     if utils.dom_get_clone(dom, resource):
         clone_resource = utils.dom_get_clone_ms_resource(dom, resource)
-        utils.err("unable to debug-start a clone, try the clone's resource: %s" % clone_resource.getAttribute("id"))
-
+        utils.err(
+            "unable to {0} a clone, try the clone's resource: {1}".format(
+                action, clone_resource.getAttribute("id")
+            )
+        )
     if utils.dom_get_master(dom, resource):
         master_resource = utils.dom_get_clone_ms_resource(dom, resource)
-        utils.err("unable to debug-start a master, try the master's resource: %s" % master_resource.getAttribute("id"))
+        utils.err(
+            "unable to {0} a master, try the master's resource: {1}".format(
+                action, master_resource.getAttribute("id")
+            )
+        )
 
-    args = ["crm_resource", "-r", resource, "--force-start"]
+    args = ["crm_resource", "-r", resource, action_command[action]]
     if "--full" in utils.pcs_options:
-        args = args + ["-V"]
-
+        args.append("-V")
+    if "--force" in utils.pcs_options:
+        args.append("--force")
     output, retval = utils.run(args)
-    print output,
+
+    if "doesn't support group resources" in output:
+        utils.err("groups are not supported")
+        sys.exit(retval)
+    if "doesn't support stonith resources" in output:
+        utils.err("stonith devices are not supported")
+        sys.exit(retval)
+
+    print(output, end="")
     sys.exit(retval)
 
 def resource_manage(argv, set_managed):
@@ -2184,11 +2301,15 @@ def resource_manage(argv, set_managed):
             if retval != 0:
                 utils.err("error attempting to unmanage resource: %s" % output)
         else:
-            xpath = "(//primitive|//group)[@id='"+resource+"']/meta_attributes/nvpair[@name='is-managed']" 
+            # Remove the meta attribute from the id specified (and all children)
+            xpath = "(//primitive|//group|//clone|//master)[@id='"+resource+"']//meta_attributes/nvpair[@name='is-managed']"
+            utils.run(["cibadmin", "-d", "--xpath", xpath, "--force"])
+            # Remove the meta attribute from the parent of the id specified, if the parent is a clone or master
+            xpath = "(//master|//clone)[(group|primitive)[@id='"+resource+"']]/meta_attributes/nvpair[@name='is-managed']"
             utils.run(["cibadmin", "-D", "--xpath", xpath])
             if isGroup:
                 for res in res_to_manage:
-                    xpath = "(//primitive|//group)[@id='"+res+"']/meta_attributes/nvpair[@name='is-managed']" 
+                    xpath = "(//primitive|//group|//clone|//master)[@id='"+res+"']/meta_attributes/nvpair[@name='is-managed']"
                     utils.run(["cibadmin", "-D", "--xpath", xpath])
 
 def is_managed(resource_id):
@@ -2249,29 +2370,29 @@ def resource_failcount(argv):
                         utils.err("Unable to remove failcounts from %s on %s\n" % (resource,ta_node) + output)
                     fail_counts_removed = fail_counts_removed + 1
                 else:
-                    output_dict[ta_node] = " " + ta_node + ": " + nvp.getAttribute("value") + "\n"
+                    output_dict[ta_node] = " " + ta_node + ": " + nvp.getAttribute("value")
                 break
 
     if resource_command == "reset":
         if fail_counts_removed == 0:
-            print "No failcounts needed resetting"
+            print("No failcounts needed resetting")
     if resource_command == "show":
-        output = ""
-        for key in sorted(output_dict.iterkeys()):
-            output += output_dict[key]
+        output = []
+        for key in sorted(output_dict.keys()):
+            output.append(output_dict[key])
 
 
-        if output == "":
+        if not output:
             if all_nodes:
-                print "No failcounts for %s" % resource
+                print("No failcounts for %s" % resource)
             else:
-                print "No failcounts for %s on %s" % (resource,node)
+                print("No failcounts for %s on %s" % (resource,node))
         else:
             if all_nodes:
-                print "Failcounts for %s" % resource
+                print("Failcounts for %s" % resource)
             else:
-                print "Failcounts for %s on %s" % (resource,node)
-            print output,
+                print("Failcounts for %s on %s" % (resource,node))
+            print("\n".join(output))
 
 
 def show_defaults(def_type, indent=""):
@@ -2280,78 +2401,87 @@ def show_defaults(def_type, indent=""):
     if len(defs) > 0:
         defs = defs[0]
     else:
-        print indent + "No defaults set"
+        print(indent + "No defaults set")
         return
 
     foundDefault = False
     for d in defs.getElementsByTagName("nvpair"):
-        print indent + d.getAttribute("name") + ": " + d.getAttribute("value")
+        print(indent + d.getAttribute("name") + ": " + d.getAttribute("value"))
         foundDefault = True
 
     if not foundDefault:
-        print indent + "No defaults set"
+        print(indent + "No defaults set")
 
 def set_default(def_type, argv):
     for arg in argv:
         args = arg.split('=')
         if (len(args) != 2):
-            print "Invalid Property: " + arg
+            print("Invalid Property: " + arg)
             continue
         utils.setAttribute(def_type, args[0], args[1])
 
 def print_node(node, tab = 0):
     spaces = " " * tab
     if node.tag == "group":
-        print spaces + "Group: " + node.attrib["id"] + get_attrs(node,' (',')')
+        print(spaces + "Group: " + node.attrib["id"] + get_attrs(node,' (',')'))
         print_instance_vars_string(node, spaces)
         print_meta_vars_string(node, spaces)
         print_operations(node, spaces)
         for child in node:
             print_node(child, tab + 1)
     if node.tag == "clone":
-        print spaces + "Clone: " + node.attrib["id"] + get_attrs(node,' (',')')
+        print(spaces + "Clone: " + node.attrib["id"] + get_attrs(node,' (',')'))
         print_instance_vars_string(node, spaces)
         print_meta_vars_string(node, spaces)
         print_operations(node, spaces)
         for child in node:
             print_node(child, tab + 1)
     if node.tag == "primitive":
-        print spaces + "Resource: " + node.attrib["id"] + get_attrs(node,' (',')')
+        print(spaces + "Resource: " + node.attrib["id"] + get_attrs(node,' (',')'))
         print_instance_vars_string(node, spaces)
         print_meta_vars_string(node, spaces)
+        print_utilization_string(node, spaces)
         print_operations(node, spaces)
     if node.tag == "master":
-        print spaces + "Master: " + node.attrib["id"] + get_attrs(node, ' (', ')')
+        print(spaces + "Master: " + node.attrib["id"] + get_attrs(node, ' (', ')'))
         print_instance_vars_string(node, spaces)
         print_meta_vars_string(node, spaces)
         print_operations(node, spaces)
         for child in node:
             print_node(child, tab + 1)
 
+def print_utilization_string(element, spaces):
+    output = []
+    mvars = element.findall("utilization/nvpair")
+    for mvar in mvars:
+        output.append(mvar.attrib["name"] + "=" + mvar.attrib["value"])
+    if output:
+        print(spaces + " Utilization: " + " ".join(output))
+
 def print_instance_vars_string(node, spaces):
-    output = ""
-    ivars = node.findall("instance_attributes/nvpair")
+    output = []
+    ivars = node.findall(str("instance_attributes/nvpair"))
     for ivar in ivars:
         name = ivar.attrib["name"]
         value = ivar.attrib["value"]
-        if value.find(" ") != -1:
+        if " " in value:
             value = '"' + value + '"'
-        output += name + "=" + value + " "
-    if output != "":
-        print spaces + " Attributes: " + output
+        output.append(name + "=" + value)
+    if output:
+        print(spaces + " Attributes: " + " ".join(output))
 
 def print_meta_vars_string(node, spaces):
     output = ""
-    mvars = node.findall("meta_attributes/nvpair")
+    mvars = node.findall(str("meta_attributes/nvpair"))
     for mvar in mvars:
         output += mvar.attrib["name"] + "=" + mvar.attrib["value"] + " "
     if output != "":
-        print spaces + " Meta Attrs: " + output
+        print(spaces + " Meta Attrs: " + output)
 
 def print_operations(node, spaces):
     indent = len(spaces) + len(" Operations: ")
     output = ""
-    ops = node.findall("operations/op")
+    ops = node.findall(str("operations/op"))
     first = True
     for op in ops:
         if not first:
@@ -2363,7 +2493,7 @@ def print_operations(node, spaces):
             if attr in ["id","name"] :
                 continue
             output += attr + "=" + val + " "
-        for child in op.findall(".//nvpair"):
+        for child in op.findall(str(".//nvpair")):
             output += child.get("name") + "=" + child.get("value") + " "
 
         output += "(" + op.attrib["id"] + ")"
@@ -2371,7 +2501,7 @@ def print_operations(node, spaces):
 
     output = output.rstrip()
     if output != "":
-        print spaces + " Operations: " + output
+        print(spaces + " Operations: " + output)
 
 def operation_to_string(op_el):
     parts = []
@@ -2403,14 +2533,14 @@ def resource_cleanup(res_id):
     if retval != 0:
         utils.err("Unable to cleanup resource: %s" % res_id + "\n" + output)
     else:
-        print "Resource: %s successfully cleaned up" % res_id
+        print(output)
 
 def resource_cleanup_all():
     (output, retval) = utils.run(["crm_resource", "-C"])
     if retval != 0:
         utils.err("Unexpected error occured. 'crm_resource -C' err_code: %s\n%s" % (retval, output))
     else:
-        print "All resources/stonith devices successfully cleaned up"
+        print(output)
 
 def resource_history(args):
     dom = utils.get_cib_dom()
@@ -2425,16 +2555,244 @@ def resource_history(args):
             resources[res_id][rsc_op.getAttribute("call-id")] = [res_id, rsc_op]
     
     for res in sorted(resources):
-        print "Resource: %s" % res
+        print("Resource: %s" % res)
         for cid in sorted(resources[res]):
             (last_date,retval) = utils.run(["date","-d", "@" + resources[res][cid][1].getAttribute("last-rc-change")])
             last_date = last_date.rstrip()
             rc_code = resources[res][cid][1].getAttribute("rc-code")
             operation = resources[res][cid][1].getAttribute("operation") 
             if rc_code != "0":
-                print "  Failed on %s" % last_date
+                print("  Failed on %s" % last_date)
             elif operation == "stop":
-                print "  Stopped on node xx on %s" % last_date
+                print("  Stopped on node xx on %s" % last_date)
             elif operation == "start":
-                print "  Started on node xx %s" % last_date
+                print("  Started on node xx %s" % last_date)
+
+def resource_relocate(argv):
+    if len(argv) < 1:
+        usage.resource(["relocate"])
+        sys.exit(1)
+    cmd = argv.pop(0)
+    if cmd == "show":
+        if argv:
+            usage.resource(["relocate show"])
+            sys.exit(1)
+        resource_relocate_show(utils.get_cib_dom())
+    elif cmd == "dry-run":
+        resource_relocate_run(utils.get_cib_dom(), argv, True)
+    elif cmd == "run":
+        resource_relocate_run(utils.get_cib_dom(), argv, False)
+    elif cmd == "clear":
+        if argv:
+            usage.resource(["relocate clear"])
+            sys.exit(1)
+        utils.replace_cib_configuration(
+            resource_relocate_clear(utils.get_cib_dom())
+        )
+    else:
+        usage.resource(["relocate"])
+        sys.exit(1)
+
+def resource_relocate_set_stickiness(cib_dom, resources=None):
+    resources = [] if resources is None else resources
+    cib_dom = cib_dom.cloneNode(True) # do not change the original cib
+    resources_found = set()
+    updated_resources = set()
+    # set stickiness=0
+    for tagname in ("master", "clone", "group", "primitive"):
+        for res_el in cib_dom.getElementsByTagName(tagname):
+            if resources and res_el.getAttribute("id") not in resources:
+                continue
+            resources_found.add(res_el.getAttribute("id"))
+            res_and_children = (
+                [res_el]
+                +
+                res_el.getElementsByTagName("group")
+                +
+                res_el.getElementsByTagName("primitive")
+            )
+            updated_resources.update(
+                [el.getAttribute("id") for el in res_and_children]
+            )
+            for res_or_child in res_and_children:
+                meta_attributes = utils.dom_prepare_child_element(
+                    res_or_child,
+                    "meta_attributes",
+                    res_or_child.getAttribute("id") + "-"
+                )
+                utils.dom_update_nv_pair(
+                    meta_attributes,
+                    "resource-stickiness",
+                    "0",
+                    meta_attributes.getAttribute("id") + "-"
+                )
+    # resources don't exist
+    if resources:
+        resources_not_found = set(resources) - resources_found
+        if resources_not_found:
+            for res_id in resources_not_found:
+                utils.err(
+                    "unable to find a resource/clone/master/group: {0}".format(
+                        res_id
+                    ),
+                    False
+                )
+            sys.exit(1)
+    return cib_dom, updated_resources
+
+def resource_relocate_get_locations(cib_dom, resources=None):
+    resources = [] if resources is None else resources
+    updated_cib, updated_resources = resource_relocate_set_stickiness(
+        cib_dom, resources
+    )
+    simout, transitions, new_cib = utils.simulate_cib(updated_cib)
+    operation_list = utils.get_operations_from_transitions(transitions)
+    locations = utils.get_resources_location_from_operations(
+        new_cib, operation_list
+    )
+    # filter out non-requested resources
+    if not resources:
+        return list(locations.values())
+    return [
+        val for val in locations.values()
+        if val["id"] in updated_resources
+            or val["id_for_constraint"] in updated_resources
+    ]
+
+def resource_relocate_show(cib_dom):
+    updated_cib, updated_resources = resource_relocate_set_stickiness(cib_dom)
+    simout, transitions, new_cib = utils.simulate_cib(updated_cib)
+    in_status = False
+    in_status_resources = False
+    in_transitions = False
+    for line in simout.split("\n"):
+        if line.strip() == "Current cluster status:":
+            in_status = True
+            in_status_resources = False
+            in_transitions = False
+        elif line.strip() == "Transition Summary:":
+            in_status = False
+            in_status_resources = False
+            in_transitions = True
+            print()
+        elif line.strip() == "":
+            if in_status:
+                in_status = False
+                in_status_resources = True
+                in_transitions = False
+            else:
+                in_status = False
+                in_status_resources = False
+                in_transitions = False
+        if in_status or in_status_resources or in_transitions:
+            print(line)
+
+def resource_relocate_location_to_str(location):
+    message = "Creating location constraint: {res} prefers {node}=INFINITY{role}"
+    if "start_on_node" in location:
+        return message.format(
+            res=location["id_for_constraint"], node=location["start_on_node"],
+            role=""
+        )
+    if "promote_on_node" in location:
+        return message.format(
+            res=location["id_for_constraint"], node=location["promote_on_node"],
+            role=" role=Master"
+        )
+    return ""
+
+def resource_relocate_run(cib_dom, resources=None, dry=True):
+    resources = [] if resources is None else resources
+    error = False
+    anything_changed = False
+    if not dry:
+        utils.check_pacemaker_supports_resource_wait()
+        if utils.usefile:
+            utils.err("This command cannot be used with -f")
+
+    # create constraints
+    cib_dom, constraint_el = constraint.getCurrentConstraints(cib_dom)
+    for location in resource_relocate_get_locations(cib_dom, resources):
+        if not("start_on_node" in location or "promote_on_node" in location):
+            continue
+        anything_changed = True
+        print(resource_relocate_location_to_str(location))
+        constraint_id = utils.find_unique_id(
+            cib_dom,
+            RESOURCE_RELOCATE_CONSTRAINT_PREFIX + location["id_for_constraint"]
+        )
+        new_constraint = cib_dom.createElement("rsc_location")
+        new_constraint.setAttribute("id", constraint_id)
+        new_constraint.setAttribute("rsc", location["id_for_constraint"])
+        new_constraint.setAttribute("score", "INFINITY")
+        if "promote_on_node" in location:
+            new_constraint.setAttribute("node", location["promote_on_node"])
+            new_constraint.setAttribute("role", "Master")
+        elif "start_on_node" in location:
+            new_constraint.setAttribute("node", location["start_on_node"])
+        constraint_el.appendChild(new_constraint)
+    if not anything_changed:
+        return
+    if not dry:
+        utils.replace_cib_configuration(cib_dom)
+
+    # wait for resources to move
+    print()
+    print("Waiting for resources to move...")
+    print()
+    if not dry:
+        output, retval = utils.run(["crm_resource", "--wait"])
+        if retval != 0:
+            error = True
+            if retval == PACEMAKER_WAIT_TIMEOUT_STATUS:
+                utils.err("waiting timeout", False)
+            else:
+                utils.err(output, False)
+
+    # remove constraints
+    resource_relocate_clear(cib_dom)
+    if not dry:
+        utils.replace_cib_configuration(cib_dom)
+
+    if error:
+        sys.exit(1)
+
+def resource_relocate_clear(cib_dom):
+    for constraint_el in cib_dom.getElementsByTagName("constraints"):
+        for location_el in constraint_el.getElementsByTagName("rsc_location"):
+            location_id = location_el.getAttribute("id")
+            if location_id.startswith(RESOURCE_RELOCATE_CONSTRAINT_PREFIX):
+                print("Removing constraint {0}".format(location_id))
+                location_el.parentNode.removeChild(location_el)
+    return cib_dom
 
+def set_resource_utilization(resource_id, argv):
+    cib = utils.get_cib_dom()
+    resource_el = utils.dom_get_resource(cib, resource_id)
+    if resource_el is None:
+        utils.err("Unable to find a resource: {0}".format(resource_id))
+
+    utils.dom_update_utilization(resource_el, utils.convert_args_to_tuples(argv))
+    utils.replace_cib_configuration(cib)
+
+def print_resource_utilization(resource_id):
+    cib = utils.get_cib_dom()
+    resource_el = utils.dom_get_resource(cib, resource_id)
+    if resource_el is None:
+        utils.err("Unable to find a resource: {0}".format(resource_id))
+    utilization = utils.get_utilization_str(resource_el)
+
+    print("Resource Utilization:")
+    print(" {0}: {1}".format(resource_id, utilization))
+
+def print_resources_utilization():
+    cib = utils.get_cib_dom()
+    utilization = {}
+    for resource_el in cib.getElementsByTagName("primitive"):
+        u = utils.get_utilization_str(resource_el)
+        if u:
+           utilization[resource_el.getAttribute("id")] = u
+
+    print("Resource Utilization:")
+    for resource in sorted(utilization):
+        print(" {0}: {1}".format(resource, utilization[resource]))
diff --git a/pcs/rule.py b/pcs/rule.py
index e5606db..92407ef 100644
--- a/pcs/rule.py
+++ b/pcs/rule.py
@@ -1,7 +1,14 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
 import re
 import xml.dom.minidom
+
 import utils
 
+
 # main functions
 
 def parse_argv(argv, extra_options=None):
@@ -34,7 +41,7 @@ def dom_rule_add(dom_element, options, rule_argv):
         utils.err("can not specify both score and score-attribute")
     if options.get("score") and not utils.is_score(options["score"]):
         # preserving legacy behaviour
-        print (
+        print(
             "Warning: invalid score '%s', setting score-attribute=pingd instead"
             % options["score"]
         )
@@ -79,7 +86,7 @@ def dom_rule_add(dom_element, options, rule_argv):
     # add options into rule xml
     if not options.get("score") and not options.get("score-attribute"):
         options["score"] = "INFINITY"
-    for name, value in options.iteritems():
+    for name, value in options.items():
         if name != "id" and value is not None:
             dom_rule.setAttribute(name, value)
     # score or score-attribute is required for the nested rules in order to have
@@ -260,8 +267,9 @@ class ExportAsExpression(object):
 
     def list_attributes(self, element):
         attributes = utils.dom_attrs_to_list(element, with_id=False)
-        if self.normalize:
-            attributes.sort()
+        # sort it always to get the same output for the same input as dict is
+        # unordered
+        attributes.sort()
         return attributes
 
 
@@ -440,7 +448,8 @@ class SymbolTable(object):
         if not self.has_symbol(symbol_id):
             class SymbolClass(superclass):
                 pass
-            SymbolClass.__name__ = "symbol_" + symbol_id
+            # enforce str to be both python2 and python3 compatible
+            SymbolClass.__name__ = str("symbol_" + symbol_id)
             SymbolClass.symbol_id = symbol_id
             SymbolClass.left_binding_power = binding_power
             if expression_func:
@@ -616,8 +625,13 @@ class DateCommonValue(object):
         return False
 
     def __str__(self):
+        # sort it always to get the same output for the same input as dict is
+        # unordered
         return " ".join(
-            ["%s=%s" % (name, value) for name, value in self.parts.iteritems()]
+            [
+                "%s=%s" % (name, value)
+                for name, value in sorted(self.parts.items())
+            ]
         )
 
 
@@ -640,7 +654,7 @@ class DateSpecValue(DateCommonValue):
         super(DateSpecValue, self).__init__(parts_string, self.KEYWORD)
 
     def validate(self):
-        for name, value in self.parts.iteritems():
+        for name, value in self.parts.items():
             if not self.valid_part(name, value):
                 raise SyntaxError(
                     "invalid %s '%s' in '%s'"
@@ -677,7 +691,7 @@ class DateDurationValue(DateCommonValue):
         super(DateDurationValue, self).__init__(parts_string, self.KEYWORD)
 
     def validate(self):
-        for name, value in self.parts.iteritems():
+        for name, value in self.parts.items():
             if not value.isdigit():
                 raise SyntaxError(
                     "invalid %s '%s' in '%s'"
@@ -859,7 +873,7 @@ class CibBuilder(object):
             "date_spec",
             dom_expression.getAttribute("id") + "-datespec"
         )
-        for key, value in syntactic_tree.children[0].value.parts.iteritems():
+        for key, value in syntactic_tree.children[0].value.parts.items():
             dom_datespec.setAttribute(key, value)
 
     def build_expression(self, dom_element, syntactic_tree):
@@ -912,7 +926,7 @@ class CibBuilder(object):
                     dom_expression.getAttribute("id") + "-duration"
                 )
                 duration = syntactic_tree.children[2].children[0].value
-                for key, value in duration.parts.iteritems():
+                for key, value in duration.parts.items():
                     dom_duration.setAttribute(key, value)
             else:
                 dom_expression.setAttribute(
diff --git a/pcs/settings.py b/pcs/settings.py
index cafbac7..4cdc010 100644
--- a/pcs/settings.py
+++ b/pcs/settings.py
@@ -1,5 +1,6 @@
 pacemaker_binaries = "/usr/sbin/"
 corosync_binaries = "/usr/sbin/"
+ccs_binaries = "/usr/sbin/"
 corosync_conf_file = "/etc/corosync/corosync.conf"
 cluster_conf_file = "/etc/cluster/cluster.conf"
 fence_agent_binaries = "/usr/sbin/"
@@ -7,13 +8,16 @@ pengine_binary = "/usr/libexec/pacemaker/pengine"
 crmd_binary = "/usr/libexec/pacemaker/crmd"
 cib_binary = "/usr/libexec/pacemaker/cib"
 stonithd_binary = "/usr/libexec/pacemaker/stonithd"
-pcs_version = "0.9.139"
+pcs_version = "0.9.148"
 crm_report = pacemaker_binaries + "crm_report"
 crm_verify = pacemaker_binaries + "crm_verify"
 pcsd_cert_location = "/var/lib/pcsd/pcsd.crt"
 pcsd_key_location = "/var/lib/pcsd/pcsd.key"
+pcsd_tokens_location = "/var/lib/pcsd/tokens"
+pcsd_users_conf_location = "/var/lib/pcsd/pcs_users.conf"
+pcsd_settings_conf_location = "/var/lib/pcsd/pcs_settings.conf"
+pcsd_exec_location = "/usr/lib/pcsd/"
 corosync_uidgid_dir = "/etc/corosync/uidgid.d/"
 cib_dir = "/var/lib/pacemaker/cib/"
 pacemaker_uname = "hacluster"
 pacemaker_gname = "haclient"
-default_wait = 30
diff --git a/pcs/settings.py b/pcs/settings.py.i386-linux-gnu.debian
similarity index 51%
copy from pcs/settings.py
copy to pcs/settings.py.i386-linux-gnu.debian
index cafbac7..c13d92b 100644
--- a/pcs/settings.py
+++ b/pcs/settings.py.i386-linux-gnu.debian
@@ -1,19 +1,23 @@
 pacemaker_binaries = "/usr/sbin/"
 corosync_binaries = "/usr/sbin/"
+ccs_binaries = "/usr/sbin/"
 corosync_conf_file = "/etc/corosync/corosync.conf"
 cluster_conf_file = "/etc/cluster/cluster.conf"
 fence_agent_binaries = "/usr/sbin/"
-pengine_binary = "/usr/libexec/pacemaker/pengine"
-crmd_binary = "/usr/libexec/pacemaker/crmd"
-cib_binary = "/usr/libexec/pacemaker/cib"
-stonithd_binary = "/usr/libexec/pacemaker/stonithd"
-pcs_version = "0.9.139"
+pengine_binary = "/usr/lib/i386-linux-gnu/pacemaker/pengine"
+crmd_binary = "/usr/lib/i386-linux-gnu/pacemaker/crmd"
+cib_binary = "/usr/lib/i386-linux-gnu/pacemaker/cib"
+stonithd_binary = "/usr/lib/i386-linux-gnu/pacemaker/stonithd"
+pcs_version = "0.9.148"
 crm_report = pacemaker_binaries + "crm_report"
 crm_verify = pacemaker_binaries + "crm_verify"
 pcsd_cert_location = "/var/lib/pcsd/pcsd.crt"
 pcsd_key_location = "/var/lib/pcsd/pcsd.key"
+pcsd_tokens_location = "/var/lib/pcsd/tokens"
+pcsd_users_conf_location = "/var/lib/pcsd/pcs_users.conf"
+pcsd_settings_conf_location = "/var/lib/pcsd/pcs_settings.conf"
+pcsd_exec_location = "/usr/share/pcsd/"
 corosync_uidgid_dir = "/etc/corosync/uidgid.d/"
 cib_dir = "/var/lib/pacemaker/cib/"
 pacemaker_uname = "hacluster"
 pacemaker_gname = "haclient"
-default_wait = 30
diff --git a/pcs/settings.py b/pcs/settings.py.x86_64-linux-gnu.debian
similarity index 51%
copy from pcs/settings.py
copy to pcs/settings.py.x86_64-linux-gnu.debian
index cafbac7..cf2c256 100644
--- a/pcs/settings.py
+++ b/pcs/settings.py.x86_64-linux-gnu.debian
@@ -1,19 +1,23 @@
 pacemaker_binaries = "/usr/sbin/"
 corosync_binaries = "/usr/sbin/"
+ccs_binaries = "/usr/sbin/"
 corosync_conf_file = "/etc/corosync/corosync.conf"
 cluster_conf_file = "/etc/cluster/cluster.conf"
 fence_agent_binaries = "/usr/sbin/"
-pengine_binary = "/usr/libexec/pacemaker/pengine"
-crmd_binary = "/usr/libexec/pacemaker/crmd"
-cib_binary = "/usr/libexec/pacemaker/cib"
-stonithd_binary = "/usr/libexec/pacemaker/stonithd"
-pcs_version = "0.9.139"
+pengine_binary = "/usr/lib/x86_64-linux-gnu/pacemaker/pengine"
+crmd_binary = "/usr/lib/x86_64-linux-gnu/pacemaker/crmd"
+cib_binary = "/usr/lib/x86_64-linux-gnu/pacemaker/cib"
+stonithd_binary = "/usr/lib/x86_64-linux-gnu/pacemaker/stonithd"
+pcs_version = "0.9.148"
 crm_report = pacemaker_binaries + "crm_report"
 crm_verify = pacemaker_binaries + "crm_verify"
 pcsd_cert_location = "/var/lib/pcsd/pcsd.crt"
 pcsd_key_location = "/var/lib/pcsd/pcsd.key"
+pcsd_tokens_location = "/var/lib/pcsd/tokens"
+pcsd_users_conf_location = "/var/lib/pcsd/pcs_users.conf"
+pcsd_settings_conf_location = "/var/lib/pcsd/pcs_settings.conf"
+pcsd_exec_location = "/usr/share/pcsd/"
 corosync_uidgid_dir = "/etc/corosync/uidgid.d/"
 cib_dir = "/var/lib/pacemaker/cib/"
 pacemaker_uname = "hacluster"
 pacemaker_gname = "haclient"
-default_wait = 30
diff --git a/pcs/status.py b/pcs/status.py
index aa7b929..6c41db5 100644
--- a/pcs/status.py
+++ b/pcs/status.py
@@ -1,12 +1,17 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
 import sys
-import usage
-import utils
-import xml.dom.minidom
-import re
+import os
+
 import resource
 import cluster
 import settings
-from xml.dom.minidom import parseString
+import usage
+import utils
+
 
 def status_cmd(argv):
     if len(argv) == 0:
@@ -45,37 +50,42 @@ def full_status():
 
     if not utils.usefile or "--corosync_conf" in utils.pcs_options:
         cluster_name = utils.getClusterName()
-        print "Cluster name: %s" % cluster_name
+        print("Cluster name: %s" % cluster_name)
 
     if utils.stonithCheck():
         print("WARNING: no stonith devices and stonith-enabled is not false")
 
-    if utils.corosyncPacemakerNodeCheck():
+    if not utils.is_rhel6() and utils.corosyncPacemakerNodeCheck():
         print("WARNING: corosync and pacemaker node names do not match (IPs used in setup?)")
 
-    print output
+    print(output)
 
     if not utils.usefile:
-        if not utils.is_rhel6():
-            print "PCSD Status:"
-            cluster.cluster_gui_status([],True)
-            print ""
+        print_pcsd_daemon_status()
+        print()
         utils.serviceStatus("  ")
 
 # Parse crm_mon for status
 def nodes_status(argv):
+    if len(argv) == 1 and argv[0] == "pacemaker-id":
+        for node_id, node_name in utils.getPacemakerNodesID().items():
+            print("{0} {1}".format(node_id, node_name))
+        return
+
+    if len(argv) == 1 and argv[0] == "corosync-id":
+        for node_id, node_name in utils.getCorosyncNodesID().items():
+            print("{0} {1}".format(node_id, node_name))
+        return
+
     if len(argv) == 1 and (argv[0] == "config"):
         corosync_nodes = utils.getNodesFromCorosyncConf()
         pacemaker_nodes = utils.getNodesFromPacemaker()
-        print "Corosync Nodes:"
-        print "",
-        for node in corosync_nodes:
-            print node.strip(),
-        print ""
-        print "Pacemaker Nodes:"
-        print "",
-        for node in pacemaker_nodes:
-            print node.strip(),
+        print("Corosync Nodes:")
+        if corosync_nodes:
+            print(" " + " ".join(corosync_nodes))
+        print("Pacemaker Nodes:")
+        if pacemaker_nodes:
+            print(" " + " ".join(pacemaker_nodes))
 
         return
 
@@ -91,15 +101,9 @@ def nodes_status(argv):
 
         online_nodes.sort()
         offline_nodes.sort()
-        print "Corosync Nodes:"
-        print " Online:",
-        for node in online_nodes:
-            print node,
-        print ""
-        print " Offline:",
-        for node in offline_nodes:
-            print node,
-        print ""
+        print("Corosync Nodes:")
+        print(" ".join([" Online:"] + online_nodes))
+        print(" ".join([" Offline:"] + offline_nodes))
         if argv[0] != "both":
             sys.exit(0)
 
@@ -112,37 +116,53 @@ def nodes_status(argv):
     onlinenodes = []
     offlinenodes = []
     standbynodes = []
+    maintenancenodes = []
+    remote_onlinenodes = []
+    remote_offlinenodes = []
+    remote_standbynodes = []
+    remote_maintenancenodes = []
     for node in nodes[0].getElementsByTagName("node"):
+        node_name = node.getAttribute("name")
+        node_remote = node.getAttribute("type") == "remote"
         if node.getAttribute("online") == "true":
             if node.getAttribute("standby") == "true":
-                standbynodes.append(node.getAttribute("name"))
+                if node_remote:
+                    remote_standbynodes.append(node_name)
+                else:
+                    standbynodes.append(node_name)
+            elif node.getAttribute("maintenance") == "true":
+                if node_remote:
+                    remote_maintenancenodes.append(node_name)
+                else:
+                    maintenancenodes.append(node_name)
             else:
-                onlinenodes.append(node.getAttribute("name"))
+                if node_remote:
+                    remote_onlinenodes.append(node_name)
+                else:
+                    onlinenodes.append(node_name)
         else:
-            offlinenodes.append(node.getAttribute("name"))
-
-    print "Pacemaker Nodes:"
-
-    print " Online:",
-    for node in onlinenodes:
-        print node,
-    print ""
+            if node_remote:
+                remote_offlinenodes.append(node_name)
+            else:
+                offlinenodes.append(node_name)
 
-    print " Standby:",
-    for node in standbynodes:
-        print node,
-    print ""
+    print("Pacemaker Nodes:")
+    print(" ".join([" Online:"] + onlinenodes))
+    print(" ".join([" Standby:"] + standbynodes))
+    print(" ".join([" Maintenance:"] + maintenancenodes))
+    print(" ".join([" Offline:"] + offlinenodes))
 
-    print " Offline:",
-    for node in offlinenodes:
-        print node,
-    print ""
+    print("Pacemaker Remote Nodes:")
+    print(" ".join([" Online:"] + remote_onlinenodes))
+    print(" ".join([" Standby:"] + remote_standbynodes))
+    print(" ".join([" Maintenance:"] + remote_maintenancenodes))
+    print(" ".join([" Offline:"] + remote_offlinenodes))
 
 # TODO: Remove, currently unused, we use status from the resource.py
 def resources_status(argv):
     info_dom = utils.getClusterState()
 
-    print "Resources:"
+    print("Resources:")
 
     resources = info_dom.getElementsByTagName("resources")
     if resources.length == 0:
@@ -155,9 +175,9 @@ def resources_status(argv):
             for node in nodes:
                 node_line += node.getAttribute("name") + " "
 
-        print "", resource.getAttribute("id"),
-        print "(" + resource.getAttribute("resource_agent") + ")",
-        print "- " + resource.getAttribute("role") + " " + node_line
+        print("", resource.getAttribute("id"), end=' ')
+        print("(" + resource.getAttribute("resource_agent") + ")", end=' ')
+        print("- " + resource.getAttribute("role") + " " + node_line)
 
 def cluster_status(argv):
     (output, retval) = utils.run(["crm_mon", "-1", "-r"])
@@ -166,29 +186,33 @@ def cluster_status(argv):
         utils.err("cluster is not currently running on this node")
 
     first_empty_line = False
-    print "Cluster Status:"
+    print("Cluster Status:")
     for line in output.splitlines():
         if line == "":
             if first_empty_line:
-                return
+                break
             first_empty_line = True
             continue
         else:
-            print "",line
+            print("",line)
+
+    if not utils.usefile:
+        print()
+        print_pcsd_daemon_status()
 
 def corosync_status():
     (output, retval) = utils.run(["corosync-quorumtool", "-l"])
     if retval != 0:
         utils.err("corosync not running")
     else:
-        print output,
+        print(output, end="")
 
 def xml_status():
     (output, retval) = utils.run(["crm_mon", "-1", "-r", "-X"])
 
     if (retval != 0):
         utils.err("running crm_mon, is pacemaker running?")
-    print output
+    print(output, end="")
 
 def is_cman_running():
     if utils.is_systemctl():
@@ -211,3 +235,19 @@ def is_pacemaker_running():
         output, retval = utils.run(["service", "pacemaker", "status"])
     return retval == 0
 
+def print_pcsd_daemon_status():
+    print("PCSD Status:")
+    if os.getuid() == 0:
+        cluster.cluster_gui_status([], True)
+    else:
+        err_msgs, exitcode, std_out, std_err = utils.call_local_pcsd(
+            ['status', 'pcsd'], True
+        )
+        if err_msgs:
+            for msg in err_msgs:
+                print(msg)
+        if 0 == exitcode:
+            print(std_out)
+        else:
+            print("Unable to get PCSD status")
+
diff --git a/pcs/stonith.py b/pcs/stonith.py
index 1d49ded..d11508a 100644
--- a/pcs/stonith.py
+++ b/pcs/stonith.py
@@ -1,14 +1,16 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
 import sys
-import resource
-#import sys
-import xml.dom.minidom
-#from xml.dom.minidom import getDOMImplementation
+import re
+import glob
 from xml.dom.minidom import parseString
+
 import usage
 import utils
-import re
-import glob
-import os
+import resource
 
 def stonith_cmd(argv):
     if len(argv) == 0:
@@ -26,24 +28,17 @@ def stonith_cmd(argv):
             usage.stonith()
             sys.exit(1)
     elif (sub_cmd == "create"):
-        if len(argv) < 2:
-            usage.stonith()
-            sys.exit(1)
-        stn_id = argv.pop(0)
-        stn_type = "stonith:"+argv.pop(0)
-        st_values, op_values, meta_values = resource.parse_resource_options(
-            argv, with_clone=False
-        )
-        resource.resource_create(stn_id, stn_type, st_values, op_values, meta_values)
+        stonith_create(argv)
     elif (sub_cmd == "update"):
-        stn_id = argv.pop(0)
-        resource.resource_update(stn_id,argv)
+        if len(argv) > 1:
+            stn_id = argv.pop(0)
+            resource.resource_update(stn_id,argv)
+        else:
+            usage.stonith(["update"])
+            sys.exit(1)
     elif (sub_cmd == "delete"):
         if len(argv) == 1:
             stn_id = argv.pop(0)
-            utils.replace_cib_configuration(
-                stonith_level_rm_device(utils.get_cib_dom(), stn_id)
-            )
             resource.resource_remove(stn_id)
         else:
             usage.stonith(["delete"])
@@ -97,19 +92,22 @@ def stonith_list_available(argv):
         if not "--nodesc" in utils.pcs_options:
             metadata = utils.get_stonith_metadata(fd)
             if metadata == False:
-                print >> sys.stderr, "Error: no metadata for %s" % fd
+                utils.err("no metadata for %s" % fd, False)
                 continue
             try:
                 dom = parseString(metadata)
             except Exception:
-                print >> sys.stderr, "Error: unable to parse metadata for fence agent: %s" % (fd_name)
+                utils.err(
+                    "unable to parse metadata for fence agent: %s" % (fd_name),
+                    False
+                )
                 continue
             ra = dom.documentElement
             shortdesc = ra.getAttribute("shortdesc")
 
             if len(shortdesc) > 0:
                 sd = " - " +  resource.format_desc(fd_name.__len__() + 3, shortdesc)
-        print fd_name + sd
+        print(fd_name + sd)
 
 def stonith_list_options(stonith_agent):
     metadata = utils.get_stonith_metadata(utils.fence_bin + stonith_agent)
@@ -135,12 +133,12 @@ def stonith_list_options(stonith_agent):
 
     if short_desc:
         title += " - " + resource.format_desc(len(title + " - "), short_desc)
-    print title
-    print
+    print(title)
+    print()
     if long_desc:
-        print long_desc
-        print
-    print "Stonith options:"
+        print(long_desc)
+        print()
+    print("Stonith options:")
 
     params = dom.documentElement.getElementsByTagName("parameter")
     for param in params:
@@ -150,25 +148,46 @@ def stonith_list_options(stonith_agent):
         desc = ""
         shortdesc_els = param.getElementsByTagName("shortdesc")
         if shortdesc_els and shortdesc_els[0].firstChild:
-            desc = shortdesc_els[0].firstChild.nodeValue.strip().replace("\n", "")
+            desc = shortdesc_els[0].firstChild.nodeValue.strip().replace("\n", " ")
         if not desc:
             desc = "No description available"
         indent = name.__len__() + 4
         desc = resource.format_desc(indent, desc)
-        print "  " + name + ": " + desc
+        print("  " + name + ": " + desc)
 
     default_stonith_options = utils.get_default_stonith_options()
     for do in default_stonith_options:
         name = do.attrib["name"]
         desc = ""
-        if len(do.findall("shortdesc")) > 0:
-            if do.findall("shortdesc")[0].text:
-                desc = do.findall("shortdesc")[0].text.strip()
+        if len(do.findall(str("shortdesc"))) > 0:
+            if do.findall(str("shortdesc"))[0].text:
+                desc = do.findall(str("shortdesc"))[0].text.strip()
         if not desc:
             desc = "No description available"
         indent = len(name) + 4
         desc = resource.format_desc(indent, desc)
-        print "  " + name + ": " + desc
+        print("  " + name + ": " + desc)
+
+def stonith_create(argv):
+    if len(argv) < 2:
+        usage.stonith(["create"])
+        sys.exit(1)
+
+    stonith_id = argv.pop(0)
+    stonith_type = argv.pop(0)
+    st_values, op_values, meta_values = resource.parse_resource_options(
+        argv, with_clone=False
+    )
+    metadata = utils.get_stonith_metadata("/usr/sbin/" + stonith_type)
+    if metadata:
+        if stonith_does_agent_provide_unfencing(metadata):
+            meta_values = [
+                meta for meta in meta_values if not meta.startswith("provides=")
+            ]
+            meta_values.append("provides=unfencing")
+    resource.resource_create(
+        stonith_id, "stonith:" + stonith_type, st_values, op_values, meta_values
+    )
 
 def stonith_level(argv):
     if len(argv) == 0:
@@ -204,13 +223,16 @@ def stonith_level(argv):
     elif subcmd == "verify":
         stonith_level_verify()
     else:
-        print "pcs stonith level: invalid option -- '%s'" % subcmd
+        print("pcs stonith level: invalid option -- '%s'" % subcmd)
         usage.stonith(["level"])
         sys.exit(1)
 
 def stonith_level_add(level, node, devices):
     dom = utils.get_cib_dom()
 
+    if not re.search(r'^\d+$', level) or re.search(r'^0+$', level):
+        utils.err("invalid level '{0}', use a positive integer".format(level))
+    level = level.lstrip('0')
     if not "--force" in utils.pcs_options:
         for dev in devices.split(","):
             if not utils.is_stonith_resource(dev):
@@ -359,13 +381,12 @@ def stonith_level_show():
     if len(node_levels.keys()) == 0:
         return
 
-    nodes = node_levels.keys()
-    nodes.sort()
+    nodes = sorted(node_levels.keys())
 
     for node in nodes:
-        print " Node: " + node
+        print(" Node: " + node)
         for level in sorted(node_levels[node], key=lambda x: int(x[0])):
-            print "  Level " + level[0] + " - " + level[1]
+            print("  Level " + level[0] + " - " + level[1])
 
 
 def stonith_fence(argv):
@@ -382,7 +403,7 @@ def stonith_fence(argv):
     if retval != 0:
         utils.err("unable to fence '%s'\n" % node + output)
     else:
-        print "Node: %s fenced" % node
+        print("Node: %s fenced" % node)
 
 def stonith_confirm(argv):
     if len(argv) != 1:
@@ -395,4 +416,25 @@ def stonith_confirm(argv):
     if retval != 0:
         utils.err("unable to confirm fencing of node '%s'\n" % node + output)
     else:
-        print "Node: %s confirmed fenced" % node
+        print("Node: %s confirmed fenced" % node)
+
+def stonith_does_agent_provide_unfencing(metadata_string):
+    try:
+        dom = parseString(metadata_string)
+        for agent in utils.dom_get_children_by_tag_name(dom, "resource-agent"):
+            for actions in utils.dom_get_children_by_tag_name(agent, "actions"):
+                for action in utils.dom_get_children_by_tag_name(
+                    actions, "action"
+                ):
+                    if (
+                        action.getAttribute("name") == "on"
+                        and
+                        action.getAttribute("on_target") == "1"
+                        and
+                        action.getAttribute("automatic") == "1"
+                    ):
+                        return True
+    except xml.parsers.expat.ExpatError as e:
+        return False
+    return False
+
diff --git a/pcs/test/.gitignore b/pcs/test/.gitignore
index 8c710cf..2d5046c 100644
--- a/pcs/test/.gitignore
+++ b/pcs/test/.gitignore
@@ -1,2 +1,3 @@
 *.tmp
 temp*.xml
+temp.xml-old
diff --git a/pcs/test/Makefile b/pcs/test/Makefile
index bd2d0c1..4a9bb35 100644
--- a/pcs/test/Makefile
+++ b/pcs/test/Makefile
@@ -1,18 +1,15 @@
-python_version_full := $(wordlist 2,4,$(subst ., ,$(shell python --version 2>&1)))
-python_version_major := $(word 1,${python_version_full})
-python_version_minor := $(word 2,${python_version_full})
-python_version_major_minor := ${python_version_major}.${python_version_minor}
-
-ifneq ($(python_version_major_minor),2.6)
-  pyunit_flags = -f
+ifndef $(PYTHON)
+	PYTHON = python
 endif
 
 test:
-	python test_utils.py ${pyunit_flags}
-	python test_cluster.py ${pyunit_flags}
-	python test_resource.py ${pyunit_flags}
-	python test_rule.py ${pyunit_flags}
-	python test_constraints.py ${pyunit_flags}
-	python test_stonith.py ${pyunit_flags}
-	python test_properties.py ${pyunit_flags}
-	python test_acl.py ${pyunit_flags}
+	$(PYTHON) test_utils.py ${pyunit_flags}
+	$(PYTHON) test_corosync_conf.py ${pyunit_flags}
+	$(PYTHON) test_cluster.py ${pyunit_flags}
+	$(PYTHON) test_resource.py ${pyunit_flags}
+	$(PYTHON) test_rule.py ${pyunit_flags}
+	$(PYTHON) test_constraints.py ${pyunit_flags}
+	$(PYTHON) test_stonith.py ${pyunit_flags}
+	$(PYTHON) test_properties.py ${pyunit_flags}
+	$(PYTHON) test_acl.py ${pyunit_flags}
+	$(PYTHON) test_node.py ${pyunit_flags}
diff --git a/pcs/test/corosync.conf b/pcs/test/corosync.conf
index 8466744..636d716 100644
--- a/pcs/test/corosync.conf
+++ b/pcs/test/corosync.conf
@@ -1,25 +1,26 @@
 totem {
-version: 2
-secauth: off
-cluster_name: test99
-transport: udpu
+    version: 2
+    secauth: off
+    cluster_name: test99
+    transport: udpu
 }
 
 nodelist {
-  node {
+    node {
         ring0_addr: rh7-1
         nodeid: 1
-       }
-  node {
+    }
+
+    node {
         ring0_addr: rh7-2
         nodeid: 2
-       }
+    }
 }
 
 quorum {
-provider: corosync_votequorum
+    provider: corosync_votequorum
 }
 
 logging {
-to_syslog: yes
+    to_syslog: yes
 }
diff --git a/pcs/test/corosync.conf.orig b/pcs/test/corosync.conf.orig
index 8466744..636d716 100644
--- a/pcs/test/corosync.conf.orig
+++ b/pcs/test/corosync.conf.orig
@@ -1,25 +1,26 @@
 totem {
-version: 2
-secauth: off
-cluster_name: test99
-transport: udpu
+    version: 2
+    secauth: off
+    cluster_name: test99
+    transport: udpu
 }
 
 nodelist {
-  node {
+    node {
         ring0_addr: rh7-1
         nodeid: 1
-       }
-  node {
+    }
+
+    node {
         ring0_addr: rh7-2
         nodeid: 2
-       }
+    }
 }
 
 quorum {
-provider: corosync_votequorum
+    provider: corosync_votequorum
 }
 
 logging {
-to_syslog: yes
+    to_syslog: yes
 }
diff --git a/pcs/test/pcs_test_functions.py b/pcs/test/pcs_test_functions.py
index 787d177..9722e2f 100644
--- a/pcs/test/pcs_test_functions.py
+++ b/pcs/test/pcs_test_functions.py
@@ -1,11 +1,19 @@
-import os,sys
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os.path
+import sys
 import difflib
 import subprocess
 import re
 parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-sys.path.insert(0,parentdir) 
+sys.path.insert(0,parentdir)
+
 import utils
 
+
 pcs_location = "../pcs.py"
 
 # Run pcs with -f on specified file
@@ -39,14 +47,13 @@ def ac(a,b):
     if a != b:
         d = difflib.Differ()
         diff = d.compare(b.splitlines(1),a.splitlines(1))
-        print ""
-        print "".join(diff)
+        print("")
+        print("".join(diff))
         assert False,[a]
 
 def isMinimumPacemakerVersion(cmajor,cminor,crev):
-    p = subprocess.Popen(["crm_mon","--version"], stdout=subprocess.PIPE)
-    (stdout, stderr) = p.communicate()
-    pacemaker_version =  stdout.split("\n")[0]
+    output, retval = utils.run(["crm_mon", "--version"])
+    pacemaker_version = output.split("\n")[0]
     r = re.compile(r"Pacemaker (\d+)\.(\d+)\.(\d+)")
     m = r.match(pacemaker_version)
     major = int(m.group(1))
diff --git a/pcs/test/test.py b/pcs/test/test.py
index 0a043a6..0e9b980 100644
--- a/pcs/test/test.py
+++ b/pcs/test/test.py
@@ -3,17 +3,17 @@ import xml.etree.ElementTree as ET
 
 tree = ET.parse('temp.xml')
 root = tree.getroot()
-print type(tree)
-print type(root)
+print(type(tree))
+print(type(root))
 
 if type(tree) == ET.ElementTree:
-  print "ELEMENT"
+  print("ELEMENT")
 else:
-  print "FAIL"
+  print("FAIL")
 
 check_id = "D4"
-print root.find(".//primitive[id=D1]")
-print root.find(".//primitive[@id='"+check_id+"']")
+print(root.find(".//primitive[id=D1]"))
+print(root.find(".//primitive[@id='"+check_id+"']"))
 
 #for z in root.findall(".//*"):
 #  print z
diff --git a/pcs/test/test_acl.py b/pcs/test/test_acl.py
index 36ba436..c8677b2 100644
--- a/pcs/test/test_acl.py
+++ b/pcs/test/test_acl.py
@@ -1,10 +1,18 @@
-import os,sys
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os.path
+import sys
 import shutil
 import unittest
 parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-sys.path.insert(0,parentdir) 
+sys.path.insert(0,parentdir)
+
 import utils
-from pcs_test_functions import pcs,ac,isMinimumPacemakerVersion
+from pcs_test_functions import pcs, ac, isMinimumPacemakerVersion
+
 
 old_cib = "empty.xml"
 empty_cib = "empty-1.2.xml"
@@ -75,6 +83,38 @@ class ACLTest(unittest.TestCase):
         assert r == 0
         ac(o,"")
 
+        o, r = pcs("acl user create user1 roleX")
+        ac(o, "Error: cannot find acl role: roleX\n")
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl user create user1 role1 roleX")
+        ac(o, "Error: cannot find acl role: roleX\n")
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl group create group1 roleX")
+        ac(o, "Error: cannot find acl role: roleX\n")
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl group create group1 role1 roleX")
+        ac(o, "Error: cannot find acl role: roleX\n")
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl")
+        ac(o, """\
+ACLs are disabled, run 'pcs acl enable' to enable
+
+Role: role1
+  Permission: read xpath /xpath1/ (role1-read)
+  Permission: write xpath /xpath2/ (role1-write)
+Role: role2
+  Permission: deny xpath /xpath3/ (role2-deny)
+  Permission: deny xpath /xpath4/ (role2-deny-1)
+Role: role3
+  Permission: read xpath /xpath5/ (role3-read)
+  Permission: read xpath /xpath6/ (role3-read-1)
+""")
+        self.assertEqual(0, r)
+
         o,r = pcs("acl user create user1 role1 role2")
         assert r == 0
         ac(o,"")
@@ -164,16 +204,45 @@ class ACLTest(unittest.TestCase):
         ac(o,"")
 
         o,r = pcs("acl")
+        ac(o, """\
+ACLs are disabled, run 'pcs acl enable' to enable
+
+User: user1
+  Roles:
+Group: group1
+  Roles: role1 role3
+Role: role1
+  Permission: read xpath /xpath1/ (role1-read)
+  Permission: write xpath /xpath2/ (role1-write)
+Role: role2
+  Permission: deny xpath /xpath3/ (role2-deny)
+  Permission: deny xpath /xpath4/ (role2-deny-1)
+Role: role3
+  Permission: read xpath /xpath5/ (role3-read)
+  Permission: read xpath /xpath6/ (role3-read-1)
+""")
         assert r == 0
-        ac(o,"ACLs are disabled, run 'pcs acl enable' to enable\n\nUser: user1\n  Roles: \nGroup: group1\n  Roles: role1 role3\nRole: role1\n  Permission: read xpath /xpath1/ (role1-read)\n  Permission: write xpath /xpath2/ (role1-write)\nRole: role2\n  Permission: deny xpath /xpath3/ (role2-deny)\n  Permission: deny xpath /xpath4/ (role2-deny-1)\nRole: role3\n  Permission: read xpath /xpath5/ (role3-read)\n  Permission: read xpath /xpath6/ (role3-read-1)\n")
 
         o,r = pcs("acl role delete role3")
         assert r == 0
         ac(o,"")
 
         o,r = pcs("acl")
+        ac(o, """\
+ACLs are disabled, run 'pcs acl enable' to enable
+
+User: user1
+  Roles:
+Group: group1
+  Roles: role1
+Role: role1
+  Permission: read xpath /xpath1/ (role1-read)
+  Permission: write xpath /xpath2/ (role1-write)
+Role: role2
+  Permission: deny xpath /xpath3/ (role2-deny)
+  Permission: deny xpath /xpath4/ (role2-deny-1)
+""")
         assert r == 0
-        ac(o,"ACLs are disabled, run 'pcs acl enable' to enable\n\nUser: user1\n  Roles: \nGroup: group1\n  Roles: role1\nRole: role1\n  Permission: read xpath /xpath1/ (role1-read)\n  Permission: write xpath /xpath2/ (role1-write)\nRole: role2\n  Permission: deny xpath /xpath3/ (role2-deny)\n  Permission: deny xpath /xpath4/ (role2-deny-1)\n")
 
         o,r = pcs("acl role assign role2 to user1")
         assert r == 0
@@ -311,40 +380,83 @@ Role: role2
         ac(o,"Error: group group1 already exists\n")
 
         o,r = pcs("acl")
+        ac(o,"""\
+ACLs are disabled, run 'pcs acl enable' to enable
+
+User: user1
+  Roles:
+User: user2
+  Roles:
+Group: group1
+  Roles:
+Group: group2
+  Roles:
+""")
         assert r == 0
-        ac(o,"ACLs are disabled, run 'pcs acl enable' to enable\n\nUser: user1\n  Roles: \nUser: user2\n  Roles: \nGroup: group1\n  Roles: \nGroup: group2\n  Roles: \n")
 
         o,r = pcs("acl group delete user1")
         assert r == 1
         ac(o,"Error: unable to find acl group: user1\n")
 
         o,r = pcs("acl")
+        ac(o, """\
+ACLs are disabled, run 'pcs acl enable' to enable
+
+User: user1
+  Roles:
+User: user2
+  Roles:
+Group: group1
+  Roles:
+Group: group2
+  Roles:
+""")
         assert r == 0
-        ac(o,"ACLs are disabled, run 'pcs acl enable' to enable\n\nUser: user1\n  Roles: \nUser: user2\n  Roles: \nGroup: group1\n  Roles: \nGroup: group2\n  Roles: \n")
 
         o,r = pcs("acl group delete group2")
         ac(o,"")
         assert r == 0
 
         o,r = pcs("acl")
+        ac(o, """\
+ACLs are disabled, run 'pcs acl enable' to enable
+
+User: user1
+  Roles:
+User: user2
+  Roles:
+Group: group1
+  Roles:
+""")
         assert r == 0
-        ac(o,"ACLs are disabled, run 'pcs acl enable' to enable\n\nUser: user1\n  Roles: \nUser: user2\n  Roles: \nGroup: group1\n  Roles: \n")
 
         o,r = pcs("acl group delete group1")
         ac(o,"")
         assert r == 0
 
         o,r = pcs("acl")
+        ac(o, """\
+ACLs are disabled, run 'pcs acl enable' to enable
+
+User: user1
+  Roles:
+User: user2
+  Roles:
+""")
         assert r == 0
-        ac(o,"ACLs are disabled, run 'pcs acl enable' to enable\n\nUser: user1\n  Roles: \nUser: user2\n  Roles: \n")
 
         o,r = pcs("acl user delete user1")
         ac(o,"")
         assert r == 0
 
         o,r = pcs("acl")
+        ac(o, """\
+ACLs are disabled, run 'pcs acl enable' to enable
+
+User: user2
+  Roles:
+""")
         assert r == 0
-        ac(o,"ACLs are disabled, run 'pcs acl enable' to enable\n\nUser: user2\n  Roles: \n")
 
         o,r = pcs("acl user delete user2")
         ac(o,"")
@@ -355,6 +467,86 @@ Role: role2
         ac(o,"ACLs are disabled, run 'pcs acl enable' to enable\n\n")
 
     def testRoleCreateDelete(self):
+        o, r = pcs("acl role create role0 read")
+        self.assertTrue(o.startswith("\nUsage: pcs acl role create..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl role create role0 read //resources")
+        self.assertTrue(o.startswith("\nUsage: pcs acl role create..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl role create role0 read xpath")
+        self.assertTrue(o.startswith("\nUsage: pcs acl role create..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl role create role0 read id")
+        self.assertTrue(o.startswith("\nUsage: pcs acl role create..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl role create role0 readX xpath //resources")
+        self.assertTrue(o.startswith("\nUsage: pcs acl role create..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl role create role0 read xpathX //resources")
+        self.assertTrue(o.startswith("\nUsage: pcs acl role create..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl role create role0 description=test read")
+        self.assertTrue(o.startswith("\nUsage: pcs acl role create..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl role create role0 description=test read //resources")
+        self.assertTrue(o.startswith("\nUsage: pcs acl role create..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl role create role0 description=test read xpath")
+        self.assertTrue(o.startswith("\nUsage: pcs acl role create..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl role create role0 description=test read id")
+        self.assertTrue(o.startswith("\nUsage: pcs acl role create..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs(
+            "acl role create role0 description=test readX xpath //resources"
+        )
+        self.assertTrue(o.startswith("\nUsage: pcs acl role create..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs(
+            "acl role create role0 description=test read xpathX //resources"
+        )
+        self.assertTrue(o.startswith("\nUsage: pcs acl role create..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl role create role0 desc=test read")
+        self.assertTrue(o.startswith("\nUsage: pcs acl role create..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl role create role0 desc=test read //resources")
+        self.assertTrue(o.startswith("\nUsage: pcs acl role create..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl role create role0 desc=test read xpath")
+        self.assertTrue(o.startswith("\nUsage: pcs acl role create..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl role create role0 desc=test read id")
+        self.assertTrue(o.startswith("\nUsage: pcs acl role create..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl role create role0 desc=test readX xpath //resources")
+        self.assertTrue(o.startswith("\nUsage: pcs acl role create..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl role create role0 desc=test read xpathX //resources")
+        self.assertTrue(o.startswith("\nUsage: pcs acl role create..."))
+        self.assertEqual(1, r)
+
+        o,r = pcs("acl")
+        ac(o, "ACLs are disabled, run 'pcs acl enable' to enable\n\n")
+        self.assertEqual(0, r)
+
         o,r = pcs("acl role create role0")
         ac(o,"")
         assert r == 0
@@ -371,11 +563,11 @@ Role: role2
         ac(o,"")
         assert r == 0
 
-        o,r = pcs("acl role create role2 description='with description' read xpath /xpath/")
+        o,r = pcs("acl role create role2 description='with description' READ XPATH /xpath/")
         assert r == 0
         ac(o,"")
 
-        o,r = pcs("acl role create role3 read xpath /xpath_query/ write xpath /xpath_query2/ deny xpath /xpath_query3/")
+        o,r = pcs("acl role create role3 Read XPath /xpath_query/ wRiTe xpATH /xpath_query2/ deny xpath /xpath_query3/")
         assert r == 0
         ac(o,"")
 
@@ -468,8 +660,103 @@ Role: role2
         ac(o,"ACLs are disabled, run 'pcs acl enable' to enable\n\nRole: role1\n  Permission: read xpath /xpath1/ (role1-read)\n  Permission: write xpath /xpath2/ (role1-write)\n  Permission: deny xpath /myxpath1/ (role1-deny)\nRole: role2\n  Permission: read xpath /xpath3/ (role2-read)\n  Permission: write xpath /xpath4/ (role2-write)\nRole: role3\nRole: role4\n")
         assert r == 0
 
+        o, r = pcs("acl permission delete role1-read")
+        ac(o, "")
+        self.assertEqual(0, r)
+
+        o, r = pcs("acl permission delete role1-write")
+        ac(o, "")
+        self.assertEqual(0, r)
+
+        o, r = pcs("acl permission delete role1-deny")
+        ac(o, "")
+        self.assertEqual(0, r)
+
+        o, r = pcs("acl permission delete role2-read")
+        ac(o, "")
+        self.assertEqual(0, r)
+
+        o, r = pcs("acl permission delete role2-write")
+        ac(o, "")
+        self.assertEqual(0, r)
+
+        o, r = pcs("acl")
+        ac(o, """\
+ACLs are disabled, run 'pcs acl enable' to enable
+
+Role: role1
+Role: role2
+Role: role3
+Role: role4
+""")
+        self.assertEqual(0, r)
+
+        o, r = pcs("acl permission add role1 read")
+        self.assertTrue(o.startswith("\nUsage: pcs acl permission add..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl permission add role1 read //resources")
+        self.assertTrue(o.startswith("\nUsage: pcs acl permission add..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl permission add role1 read xpath")
+        self.assertTrue(o.startswith("\nUsage: pcs acl permission add..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl permission add role1 read id")
+        self.assertTrue(o.startswith("\nUsage: pcs acl permission add..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl permission add role1 readX xpath //resources")
+        self.assertTrue(o.startswith("\nUsage: pcs acl permission add..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl permission add role1 read xpathX //resources")
+        self.assertTrue(o.startswith("\nUsage: pcs acl permission add..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl permission add role1 read id dummy read")
+        self.assertTrue(o.startswith("\nUsage: pcs acl permission add..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl permission add role1 read id dummy read //resources")
+        self.assertTrue(o.startswith("\nUsage: pcs acl permission add..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl permission add role1 read id dummy read xpath")
+        self.assertTrue(o.startswith("\nUsage: pcs acl permission add..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl permission add role1 read id dummy read id")
+        self.assertTrue(o.startswith("\nUsage: pcs acl permission add..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs(
+            "acl permission add role1 read id dummy readX xpath //resources"
+        )
+        self.assertTrue(o.startswith("\nUsage: pcs acl permission add..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs(
+            "acl permission add role1 read id dummy read xpathX //resources"
+        )
+        self.assertTrue(o.startswith("\nUsage: pcs acl permission add..."))
+        self.assertEqual(1, r)
+
+        o, r = pcs("acl")
+        ac(o, """\
+ACLs are disabled, run 'pcs acl enable' to enable
+
+Role: role1
+Role: role2
+Role: role3
+Role: role4
+""")
+        self.assertEqual(0, r)
+
+
 if __name__ == "__main__":
     if isMinimumPacemakerVersion(1,1,11):
         unittest.main()
     else:
-        print "WARNING: Pacemaker version is too old (must be >= 1.1.11) to test acls"
+        print("WARNING: Pacemaker version is too old (must be >= 1.1.11) to test acls")
diff --git a/pcs/test/test_cluster.py b/pcs/test/test_cluster.py
index 647ec30..805fc80 100644
--- a/pcs/test/test_cluster.py
+++ b/pcs/test/test_cluster.py
@@ -1,10 +1,18 @@
-import os,sys
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os
+import sys
 import shutil
 import unittest
 parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-sys.path.insert(0,parentdir) 
+sys.path.insert(0, parentdir)
+
 import utils
-from pcs_test_functions import pcs,ac,isMinimumPacemakerVersion
+from pcs_test_functions import pcs, ac, isMinimumPacemakerVersion
+
 
 empty_cib = "empty-withnodes.xml"
 temp_cib = "temp.xml"
@@ -12,16 +20,38 @@ temp_cib = "temp.xml"
 class ClusterTest(unittest.TestCase):
     def setUp(self):
         shutil.copy(empty_cib, temp_cib)
+        if os.path.exists("corosync.conf.tmp"):
+            os.unlink("corosync.conf.tmp")
+        if os.path.exists("cluster.conf.tmp"):
+            os.unlink("cluster.conf.tmp")
 
     def testNodeStandby(self):
         output, returnVal = pcs(temp_cib, "cluster standby rh7-1") 
         ac(output, "")
         assert returnVal == 0
 
+        # try to standby node which is already in standby mode
+        output, returnVal = pcs(temp_cib, "cluster standby rh7-1")
+        ac(output, "")
+        assert returnVal == 0
+
+        output, returnVal = pcs(temp_cib, "cluster unstandby rh7-1")
+        ac(output, "")
+        assert returnVal == 0
+
+        # try to unstandby node which is no in standby mode
+        output, returnVal = pcs(temp_cib, "cluster unstandby rh7-1")
+        ac(output, "")
+        assert returnVal == 0
+
         output, returnVal = pcs(temp_cib, "cluster standby nonexistant-node") 
         assert returnVal == 1
         assert output == "Error: node 'nonexistant-node' does not appear to exist in configuration\n"
 
+        output, returnVal = pcs(temp_cib, "cluster unstandby nonexistant-node")
+        assert returnVal == 1
+        assert output == "Error: node 'nonexistant-node' does not appear to exist in configuration\n"
+
     def testRemoteNode(self):
         o,r = pcs(temp_cib, "resource create D1 Dummy --no-default-ops")
         assert r==0 and o==""
@@ -67,117 +97,734 @@ class ClusterTest(unittest.TestCase):
         assert r==0
         ac(o," Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s (D1-monitor-interval-60s)\n Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s (D2-monitor-interval-60s)\n")
 
-    def testCreation(self):
-        if utils.is_rhel6():
-            return
+    def test_cluster_setup_bad_args(self):
+        output, returnVal = pcs(temp_cib, "cluster setup")
+        self.assertEqual(
+            "Error: A cluster name (--name <name>) is required to setup a cluster\n",
+            output
+        )
+        self.assertEqual(1, returnVal)
 
-        output, returnVal = pcs(temp_cib, "cluster") 
-        assert returnVal == 1
-        assert output.startswith("\nUsage: pcs cluster [commands]...")
+        output, returnVal = pcs(temp_cib, "cluster setup --name cname")
+        self.assertTrue(output.startswith("\nUsage: pcs cluster setup..."))
+        self.assertEqual(1, returnVal)
 
-        output, returnVal = pcs(temp_cib, "cluster setup --local --corosync_conf=corosync.conf.tmp cname rh7-1 rh7-2")
-        assert returnVal == 1
-        assert output.startswith("Error: A cluster name (--name <name>) is required to setup a cluster\n")
+        output, returnVal = pcs(temp_cib, "cluster setup cname rh7-1 rh7-2")
+        self.assertEqual(
+            "Error: A cluster name (--name <name>) is required to setup a cluster\n",
+            output
+        )
+        self.assertEqual(1, returnVal)
 
-# Setup a 2 node cluster and make sure the two node config is set, then add a
-# node and make sure that it's unset, then remove a node and make sure it's
-# set again
-        output, returnVal = pcs(temp_cib, "cluster setup --force --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2")
-        ac (output,"")
-        assert returnVal == 0
+    def test_cluster_setup_hostnames_resolving(self):
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --local --corosync_conf=corosync.conf.tmp --cluster_conf=cluster.conf.tmp --name cname nonexistant-address"
+        )
+        ac(output, """\
+Error: Unable to resolve all hostnames, use --force to override
+Warning: Unable to resolve hostname: nonexistant-address
+""")
+        self.assertEqual(1, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --local --corosync_conf=corosync.conf.tmp --cluster_conf=cluster.conf.tmp --name cname nonexistant-address --force"
+        )
+        ac(output, """\
+Warning: Unable to resolve hostname: nonexistant-address
+""")
+        self.assertEqual(0, returnVal)
+
+    def test_cluster_setup_file_exists(self):
+        if utils.is_rhel6():
+            return
 
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2"
+        )
+        self.assertEqual("", output)
+        self.assertEqual(0, returnVal)
+        corosync_conf = """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udpu
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    two_node: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+"""
         with open("corosync.conf.tmp") as f:
             data = f.read()
-            ac(data,'totem {\nversion: 2\nsecauth: off\ncluster_name: cname\ntransport: udpu\n}\n\nnodelist {\n  node {\n        ring0_addr: rh7-1\n        nodeid: 1\n       }\n  node {\n        ring0_addr: rh7-2\n        nodeid: 2\n       }\n}\n\nquorum {\nprovider: corosync_votequorum\ntwo_node: 1\n}\n\nlogging {\nto_syslog: yes\n}\n')
+            ac(data, corosync_conf)
 
-        output, returnVal = pcs(temp_cib, "cluster localnode add --corosync_conf=corosync.conf.tmp rh7-3")
-        ac(output,"rh7-3: successfully added!\n")
-        assert returnVal == 0
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-2 rh7-3"
+        )
+        self.assertEqual("""\
+Error: corosync.conf.tmp already exists, use --force to overwrite
+""",
+            output
+        )
+        self.assertEqual(1, returnVal)
+        with open("corosync.conf.tmp") as f:
+            data = f.read()
+            ac(data, corosync_conf)
 
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --force --local --corosync_conf=corosync.conf.tmp --name cname rh7-2 rh7-3"
+        )
+        self.assertEqual("", output)
+        self.assertEqual(0, returnVal)
         with open("corosync.conf.tmp") as f:
             data = f.read()
-            ac(data,'totem {\nversion: 2\nsecauth: off\ncluster_name: cname\ntransport: udpu\n}\n\nnodelist {\n  node {\n        ring0_addr: rh7-1\n        nodeid: 1\n       }\n  node {\n        ring0_addr: rh7-2\n        nodeid: 2\n       }\n  node {\n        ring0_addr: rh7-3\n        nodeid: 3\n       }\n}\n\nquorum {\nprovider: corosync_votequorum\n}\n\nlogging {\nto_syslog: yes\n}\n')
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udpu
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-2
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-3
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    two_node: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
 
-        output, returnVal = pcs(temp_cib, "cluster localnode remove --corosync_conf=corosync.conf.tmp rh7-3")
-        assert returnVal == 0
-        assert output == "rh7-3: successfully removed!\n",output
+    def test_cluster_setup_file_exists_rhel6(self):
+        if not utils.is_rhel6():
+            return
+
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2"
+        )
+        self.assertEqual("", output)
+        self.assertEqual(0, returnVal)
+        cluster_conf = """\
+<cluster config_version="9" name="cname">
+  <fence_daemon/>
+  <clusternodes>
+    <clusternode name="rh7-1" nodeid="1">
+      <fence>
+        <method name="pcmk-method">
+          <device name="pcmk-redirect" port="rh7-1"/>
+        </method>
+      </fence>
+    </clusternode>
+    <clusternode name="rh7-2" nodeid="2">
+      <fence>
+        <method name="pcmk-method">
+          <device name="pcmk-redirect" port="rh7-2"/>
+        </method>
+      </fence>
+    </clusternode>
+  </clusternodes>
+  <cman broadcast="no" expected_votes="1" transport="udp" two_node="1"/>
+  <fencedevices>
+    <fencedevice agent="fence_pcmk" name="pcmk-redirect"/>
+  </fencedevices>
+  <rm>
+    <failoverdomains/>
+    <resources/>
+  </rm>
+</cluster>
+"""
+        with open("cluster.conf.tmp") as f:
+            data = f.read()
+            ac(data, cluster_conf)
+
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-2 rh7-3"
+        )
+        self.assertEqual("""\
+Error: cluster.conf.tmp already exists, use --force to overwrite
+""",
+            output
+        )
+        self.assertEqual(1, returnVal)
+        with open("cluster.conf.tmp") as f:
+            data = f.read()
+            ac(data, cluster_conf)
 
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --force --local --cluster_conf=cluster.conf.tmp --name cname rh7-2 rh7-3"
+        )
+        self.assertEqual("", output)
+        self.assertEqual(0, returnVal)
+        with open("cluster.conf.tmp") as f:
+            data = f.read()
+            ac(data, """\
+<cluster config_version="9" name="cname">
+  <fence_daemon/>
+  <clusternodes>
+    <clusternode name="rh7-2" nodeid="1">
+      <fence>
+        <method name="pcmk-method">
+          <device name="pcmk-redirect" port="rh7-2"/>
+        </method>
+      </fence>
+    </clusternode>
+    <clusternode name="rh7-3" nodeid="2">
+      <fence>
+        <method name="pcmk-method">
+          <device name="pcmk-redirect" port="rh7-3"/>
+        </method>
+      </fence>
+    </clusternode>
+  </clusternodes>
+  <cman broadcast="no" expected_votes="1" transport="udp" two_node="1"/>
+  <fencedevices>
+    <fencedevice agent="fence_pcmk" name="pcmk-redirect"/>
+  </fencedevices>
+  <rm>
+    <failoverdomains/>
+    <resources/>
+  </rm>
+</cluster>
+""")
+
+    def test_cluster_setup_2_nodes_no_atb(self):
+        # Setup a 2 node cluster and make sure the two node config is set, then
+        # add a node and make sure that it's unset, then remove a node and make
+        # sure it's set again.
+        if utils.is_rhel6():
+            return
+
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2"
+        )
+        self.assertEqual("", output)
+        self.assertEqual(0, returnVal)
         with open("corosync.conf.tmp") as f:
             data = f.read()
-            assert data == 'totem {\nversion: 2\nsecauth: off\ncluster_name: cname\ntransport: udpu\n}\n\nnodelist {\n  node {\n        ring0_addr: rh7-1\n        nodeid: 1\n       }\n  node {\n        ring0_addr: rh7-2\n        nodeid: 2\n       }\n}\n\nquorum {\nprovider: corosync_votequorum\ntwo_node: 1\n}\n\nlogging {\nto_syslog: yes\n}\n',[data]
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udpu
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    two_node: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
 
-        o,r = pcs(temp_cib, "cluster localnode add --corosync_conf=corosync.conf.tmp rh7-3,192.168.1.3")
-        assert r == 0
-        assert o == "rh7-3,192.168.1.3: successfully added!\n",[o]
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster localnode add --corosync_conf=corosync.conf.tmp rh7-3"
+        )
+        self.assertEqual("rh7-3: successfully added!\n", output)
+        self.assertEqual(0, returnVal)
         with open("corosync.conf.tmp") as f:
             data = f.read()
-            ac(data,'totem {\nversion: 2\nsecauth: off\ncluster_name: cname\ntransport: udpu\n}\n\nnodelist {\n  node {\n        ring0_addr: rh7-1\n        nodeid: 1\n       }\n  node {\n        ring0_addr: rh7-2\n        nodeid: 2\n       }\n  node {\n        ring0_addr: rh7-3\n        ring1_addr: 192.168.1.3\n        nodeid: 3\n       }\n}\n\nquorum {\nprovider: corosync_votequorum\n}\n\nlogging {\nto_syslog: yes\n}\n')
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udpu
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+
+    node {
+        ring0_addr: rh7-3
+        nodeid: 3
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
 
-        o,r = pcs(temp_cib, "cluster localnode remove --corosync_conf=corosync.conf.tmp rh7-2")
-        assert r == 0
-        assert o == "rh7-2: successfully removed!\n",[o]
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster localnode remove --corosync_conf=corosync.conf.tmp rh7-3"
+        )
+        self.assertEqual(0, returnVal)
+        self.assertEqual("rh7-3: successfully removed!\n", output)
         with open("corosync.conf.tmp") as f:
             data = f.read()
-            ac(data,'totem {\nversion: 2\nsecauth: off\ncluster_name: cname\ntransport: udpu\n}\n\nnodelist {\n  node {\n        ring0_addr: rh7-1\n        nodeid: 1\n       }\n  node {\n        ring0_addr: rh7-3\n        ring1_addr: 192.168.1.3\n        nodeid: 3\n       }\n}\n\nquorum {\nprovider: corosync_votequorum\ntwo_node: 1\n}\n\nlogging {\nto_syslog: yes\n}\n')
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udpu
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    two_node: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
 
-        o,r = pcs(temp_cib, "cluster localnode remove --corosync_conf=corosync.conf.tmp rh7-3,192.168.1.3")
-        assert r == 0
-        assert o == "rh7-3,192.168.1.3: successfully removed!\n",[o]
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster localnode add --corosync_conf=corosync.conf.tmp rh7-3,192.168.1.3"
+        )
+        self.assertEqual("rh7-3,192.168.1.3: successfully added!\n", output)
+        self.assertEqual(0, returnVal)
         with open("corosync.conf.tmp") as f:
             data = f.read()
-            ac(data,'totem {\nversion: 2\nsecauth: off\ncluster_name: cname\ntransport: udpu\n}\n\nnodelist {\n  node {\n        ring0_addr: rh7-1\n        nodeid: 1\n       }\n}\n\nquorum {\nprovider: corosync_votequorum\ntwo_node: 1\n}\n\nlogging {\nto_syslog: yes\n}\n')
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udpu
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+
+    node {
+        ring0_addr: rh7-3
+        ring1_addr: 192.168.1.3
+        nodeid: 3
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
 
-        output, returnVal = pcs(temp_cib, "cluster setup --force --local --corosync_conf=corosync.conf2.tmp --name cname rh7-1 rh7-2 rh7-3")
-        ac(output,"")
-        assert returnVal == 0
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster localnode remove --corosync_conf=corosync.conf.tmp rh7-2"
+        )
+        self.assertEqual(0, returnVal)
+        self.assertEqual("rh7-2: successfully removed!\n", output)
+        with open("corosync.conf.tmp") as f:
+            data = f.read()
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udpu
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-3
+        ring1_addr: 192.168.1.3
+        nodeid: 3
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    two_node: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
 
-        with open("corosync.conf2.tmp") as f:
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster localnode remove --corosync_conf=corosync.conf.tmp rh7-3,192.168.1.3"
+        )
+        self.assertEqual(0, returnVal)
+        self.assertEqual("rh7-3,192.168.1.3: successfully removed!\n", output)
+        with open("corosync.conf.tmp") as f:
             data = f.read()
-            assert data == 'totem {\nversion: 2\nsecauth: off\ncluster_name: cname\ntransport: udpu\n}\n\nnodelist {\n  node {\n        ring0_addr: rh7-1\n        nodeid: 1\n       }\n  node {\n        ring0_addr: rh7-2\n        nodeid: 2\n       }\n  node {\n        ring0_addr: rh7-3\n        nodeid: 3\n       }\n}\n\nquorum {\nprovider: corosync_votequorum\n\n}\n\nlogging {\nto_syslog: yes\n}\n',[data]
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udpu
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
 
-## Test to make transport is set
-        output, returnVal = pcs(temp_cib, "cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --transport udp")
-        ac(output,"Error: corosync.conf.tmp already exists, use --force to overwrite\n")
-        assert returnVal == 1
+    def test_cluster_setup_2_nodes_with_atb(self):
+        # Setup a 2 node cluster with auto_tie_breaker and make sure the two
+        # node config is NOT set, then add a node, then remove a node and make
+        # sure it is still NOT set.
+        if utils.is_rhel6():
+            return
 
-        output, returnVal = pcs(temp_cib, "cluster setup --force --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --transport udp")
-        ac(output,"")
-        assert returnVal == 0
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --auto_tie_breaker=1"
+        )
+        self.assertEqual("", output)
+        self.assertEqual(0, returnVal)
+        with open("corosync.conf.tmp") as f:
+            data = f.read()
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udpu
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    auto_tie_breaker: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
 
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster localnode add --corosync_conf=corosync.conf.tmp rh7-3"
+        )
+        self.assertEqual(output, "rh7-3: successfully added!\n")
+        self.assertEqual(0, returnVal)
         with open("corosync.conf.tmp") as f:
             data = f.read()
-            ac(data,'totem {\nversion: 2\nsecauth: off\ncluster_name: cname\ntransport: udp\n}\n\nnodelist {\n  node {\n        ring0_addr: rh7-1\n        nodeid: 1\n       }\n  node {\n        ring0_addr: rh7-2\n        nodeid: 2\n       }\n}\n\nquorum {\nprovider: corosync_votequorum\ntwo_node: 1\n}\n\nlogging {\nto_syslog: yes\n}\n')
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udpu
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+
+    node {
+        ring0_addr: rh7-3
+        nodeid: 3
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    auto_tie_breaker: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
 
-    def testCreationRhel6(self):
-        if not utils.is_rhel6():
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster localnode remove --corosync_conf=corosync.conf.tmp rh7-3"
+        )
+        self.assertEqual("rh7-3: successfully removed!\n", output)
+        self.assertEqual(0, returnVal)
+        with open("corosync.conf.tmp") as f:
+            data = f.read()
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udpu
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    auto_tie_breaker: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
+
+    def test_cluster_setup_3_nodes(self):
+        # Setup a 3 node cluster
+        if utils.is_rhel6():
             return
 
-        output, returnVal = pcs(temp_cib, "cluster") 
-        self.assertTrue(output.startswith("\nUsage: pcs cluster [commands]..."))
-        self.assertEquals(returnVal, 1)
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 rh7-3"
+        )
+        self.assertEqual("", output)
+        self.assertEqual(0, returnVal)
+        with open("corosync.conf.tmp") as f:
+            data = f.read()
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udpu
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+
+    node {
+        ring0_addr: rh7-3
+        nodeid: 3
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
+
+    def test_cluster_setup_transport(self):
+        # Test to make transport is set
+        if utils.is_rhel6():
+            return
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp cname rh7-1 rh7-2"
+            "cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --transport udp"
         )
-        ac(output, """\
-Error: A cluster name (--name <name>) is required to setup a cluster
+        self.assertEqual("", output)
+        self.assertEqual(0, returnVal)
+        with open("corosync.conf.tmp") as f:
+            data = f.read()
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udp
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    two_node: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
 """)
-        self.assertEquals(returnVal, 1)
 
+    def test_cluster_setup_2_nodes_rhel6(self):
         # Setup a 2 node cluster and make sure the two node config is set, then
         # add a node and make sure that it's unset, then remove a node and make
-        # sure it's set again
+        # sure it's set again.
+        # There is no auto-tie-breaker in CMAN so we don't need the non-atb
+        # variant as we do for corosync.
+        if not utils.is_rhel6():
+            return
+
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --force --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2"
+            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2"
         )
         ac(output, "")
-        self.assertEquals(returnVal, 0)
-
+        self.assertEqual(returnVal, 0)
         with open("cluster.conf.tmp") as f:
             data = f.read()
-        ac(data, """\
+            ac(data, """\
 <cluster config_version="9" name="cname">
   <fence_daemon/>
   <clusternodes>
@@ -212,11 +859,10 @@ Error: A cluster name (--name <name>) is required to setup a cluster
             "cluster localnode add --cluster_conf=cluster.conf.tmp rh7-3"
         )
         ac(output, "rh7-3: successfully added!\n")
-        self.assertEquals(returnVal, 0)
-
+        self.assertEqual(returnVal, 0)
         with open("cluster.conf.tmp") as f:
             data = f.read()
-        ac(data, """\
+            ac(data, """\
 <cluster config_version="13" name="cname">
   <fence_daemon/>
   <clusternodes>
@@ -258,11 +904,11 @@ Error: A cluster name (--name <name>) is required to setup a cluster
             "cluster localnode remove --cluster_conf=cluster.conf.tmp rh7-3"
         )
         ac(output, "rh7-3: successfully removed!\n")
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
 
         with open("cluster.conf.tmp") as f:
             data = f.read()
-        ac(data, """\
+            ac(data, """\
 <cluster config_version="15" name="cname">
   <fence_daemon/>
   <clusternodes>
@@ -297,11 +943,11 @@ Error: A cluster name (--name <name>) is required to setup a cluster
             "cluster localnode add --cluster_conf=cluster.conf.tmp rh7-3,192.168.1.3"
         )
         ac(output, "rh7-3,192.168.1.3: successfully added!\n")
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
 
         with open("cluster.conf.tmp") as f:
             data = f.read()
-        ac(data, """\
+            ac(data, """\
 <cluster config_version="20" name="cname">
   <fence_daemon/>
   <clusternodes>
@@ -344,11 +990,11 @@ Error: A cluster name (--name <name>) is required to setup a cluster
             "cluster localnode remove --cluster_conf=cluster.conf.tmp rh7-2"
         )
         ac(output, "rh7-2: successfully removed!\n")
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
 
         with open("cluster.conf.tmp") as f:
             data = f.read()
-        ac(data, """\
+            ac(data, """\
 <cluster config_version="22" name="cname">
   <fence_daemon/>
   <clusternodes>
@@ -384,11 +1030,11 @@ Error: A cluster name (--name <name>) is required to setup a cluster
             "cluster localnode remove --cluster_conf=cluster.conf.tmp rh7-3,192.168.1.3"
         )
         ac(output, "rh7-3,192.168.1.3: successfully removed!\n")
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
 
         with open("cluster.conf.tmp") as f:
             data = f.read()
-        ac(data, """\
+            ac(data, """\
 <cluster config_version="23" name="cname">
   <fence_daemon/>
   <clusternodes>
@@ -411,16 +1057,20 @@ Error: A cluster name (--name <name>) is required to setup a cluster
 </cluster>
 """)
 
+    def test_cluster_setup_3_nodes_rhel6(self):
+        # Setup a 3 node cluster
+        if not utils.is_rhel6():
+            return
+
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --force --local --cluster_conf=cluster.conf2.tmp --name cname rh7-1 rh7-2 rh7-3"
+            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 rh7-3"
         )
         ac(output, "")
-        self.assertEquals(returnVal, 0)
-
-        with open("cluster.conf2.tmp") as f:
+        self.assertEqual(returnVal, 0)
+        with open("cluster.conf.tmp") as f:
             data = f.read()
-        ac(data, """\
+            ac(data, """\
 <cluster config_version="12" name="cname">
   <fence_daemon/>
   <clusternodes>
@@ -457,29 +1107,22 @@ Error: A cluster name (--name <name>) is required to setup a cluster
 </cluster>
 """)
 
+    def test_cluster_setup_transport_rhel6(self):
         # Test to make transport is set
-        output, returnVal = pcs(
-            temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --transport udpu"
-        )
-        ac(output, """\
-Warning: Using udpu transport on a CMAN cluster, cluster restart is required after node add or remove
-Error: cluster.conf.tmp already exists, use --force to overwrite
-""")
-        self.assertEquals(returnVal, 1)
+        if not utils.is_rhel6():
+            return
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --force --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --transport udpu"
+            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --transport udpu"
         )
         ac(output, """\
 Warning: Using udpu transport on a CMAN cluster, cluster restart is required after node add or remove
 """)
-        self.assertEquals(returnVal, 0)
-
+        self.assertEqual(returnVal, 0)
         with open("cluster.conf.tmp") as f:
             data = f.read()
-        ac(data, """\
+            ac(data, """\
 <cluster config_version="9" name="cname">
   <fence_daemon/>
   <clusternodes>
@@ -509,34 +1152,67 @@ Warning: Using udpu transport on a CMAN cluster, cluster restart is required aft
 </cluster>
 """)
 
-    def testIPV6(self):
+    def test_cluster_setup_ipv6(self):
         if utils.is_rhel6():
             return
 
-        o,r = pcs("cluster setup --force --local --corosync_conf=corosync.conf.tmp --name cnam rh7-1 rh7-2 --ipv6")
-        ac(o,"")
-        assert r == 0
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --ipv6"
+        )
+        self.assertEqual("", output)
+        self.assertEqual(0, returnVal)
         with open("corosync.conf.tmp") as f:
             data = f.read()
-            ac(data,'totem {\nversion: 2\nsecauth: off\ncluster_name: cnam\ntransport: udpu\nip_version: ipv6\n}\n\nnodelist {\n  node {\n        ring0_addr: rh7-1\n        nodeid: 1\n       }\n  node {\n        ring0_addr: rh7-2\n        nodeid: 2\n       }\n}\n\nquorum {\nprovider: corosync_votequorum\ntwo_node: 1\n}\n\nlogging {\nto_syslog: yes\n}\n')
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udpu
+    ip_version: ipv6
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    two_node: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
 
-    def testIPV6Rhel6(self):
+    def test_cluster_setup_ipv6_rhel6(self):
         if not utils.is_rhel6():
             return
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --force --local --cluster_conf=cluster.conf.tmp --name cnam rh7-1 rh7-2 --ipv6"
+            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --ipv6"
         )
         ac(output, """\
 Warning: --ipv6 ignored as it is not supported on CMAN clusters
 """)
-        self.assertEquals(returnVal, 0)
-
+        self.assertEqual(returnVal, 0)
         with open("cluster.conf.tmp") as f:
             data = f.read()
-        ac(data, """\
-<cluster config_version="9" name="cnam">
+            ac(data, """\
+<cluster config_version="9" name="cname">
   <fence_daemon/>
   <clusternodes>
     <clusternode name="rh7-1" nodeid="1">
@@ -565,130 +1241,514 @@ Warning: --ipv6 ignored as it is not supported on CMAN clusters
 </cluster>
 """)
 
-
-    def testRRPConfig(self):
+    def test_cluster_setup_rrp_passive_udp_addr01(self):
         if utils.is_rhel6():
             return
 
-        o,r = pcs("cluster setup --transport udp --force --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0")
+        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr0 1.1.2.0")
+        assert r == 1
+        ac(o, "Error: --addr0 can only be used once\n")
+
+        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode blah --broadcast0 --transport udp")
+        assert r == 1
+        ac("Error: blah is an unknown RRP mode, use --force to override\n", o)
+
+        o,r = pcs("cluster setup --transport udp --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0")
         ac(o,"")
         assert r == 0
         with open("corosync.conf.tmp") as f:
             data = f.read()
-            ac(data,'totem {\nversion: 2\nsecauth: off\ncluster_name: cname\ntransport: udp\nrrp_mode: passive\n  interface {\n    ringnumber: 0\n    bindnetaddr: 1.1.1.0\n    mcastaddr: 239.255.1.1\n    mcastport: 5405\n  }\n  interface {\n    ringnumber: 1\n    bindnetaddr: 1.1.2.0\n    mcastaddr: 239.255.2.1\n    mcastport: 5405\n  }\n}\n\nnodelist {\n  node {\n        ring0_addr: rh7-1\n        nodeid: 1\n       }\n  node {\n        ring0_addr: rh7-2\n        nodeid: 2\n       }\n}\n [...]
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udp
+    rrp_mode: passive
+
+    interface {
+        ringnumber: 0
+        bindnetaddr: 1.1.1.0
+        mcastaddr: 239.255.1.1
+        mcastport: 5405
+    }
+
+    interface {
+        ringnumber: 1
+        bindnetaddr: 1.1.2.0
+        mcastaddr: 239.255.2.1
+        mcastport: 5405
+    }
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    two_node: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
+
+    def test_cluster_setup_rrp_passive_udp_addr01_mcast01(self):
+        if utils.is_rhel6():
+            return
 
-        o,r = pcs("cluster setup --transport udp --force --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcast0 8.8.8.8 --addr1 1.1.2.0 --mcast1 9.9.9.9")
+        o,r = pcs("cluster setup --transport udp --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcast0 8.8.8.8 --addr1 1.1.2.0 --mcast1 9.9.9.9")
         ac(o,"")
         assert r == 0
         with open("corosync.conf.tmp") as f:
             data = f.read()
-            ac(data,'totem {\nversion: 2\nsecauth: off\ncluster_name: cname\ntransport: udp\nrrp_mode: passive\n  interface {\n    ringnumber: 0\n    bindnetaddr: 1.1.1.0\n    mcastaddr: 8.8.8.8\n    mcastport: 5405\n  }\n  interface {\n    ringnumber: 1\n    bindnetaddr: 1.1.2.0\n    mcastaddr: 9.9.9.9\n    mcastport: 5405\n  }\n}\n\nnodelist {\n  node {\n        ring0_addr: rh7-1\n        nodeid: 1\n       }\n  node {\n        ring0_addr: rh7-2\n        nodeid: 2\n       }\n}\n\nquorum [...]
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udp
+    rrp_mode: passive
+
+    interface {
+        ringnumber: 0
+        bindnetaddr: 1.1.1.0
+        mcastaddr: 8.8.8.8
+        mcastport: 5405
+    }
+
+    interface {
+        ringnumber: 1
+        bindnetaddr: 1.1.2.0
+        mcastaddr: 9.9.9.9
+        mcastport: 5405
+    }
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    two_node: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
+
+    def test_cluster_setup_rrp_passive_udp_addr01_mcastport01(self):
+        if utils.is_rhel6():
+            return
 
-        o,r = pcs("cluster setup --transport udp --force --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcastport0 9999 --mcastport1 9998 --addr1 1.1.2.0")
+        o,r = pcs("cluster setup --transport udp --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcastport0 9999 --mcastport1 9998 --addr1 1.1.2.0")
         ac(o,"")
         assert r == 0
         with open("corosync.conf.tmp") as f:
             data = f.read()
-            ac(data,'totem {\nversion: 2\nsecauth: off\ncluster_name: cname\ntransport: udp\nrrp_mode: passive\n  interface {\n    ringnumber: 0\n    bindnetaddr: 1.1.1.0\n    mcastaddr: 239.255.1.1\n    mcastport: 9999\n  }\n  interface {\n    ringnumber: 1\n    bindnetaddr: 1.1.2.0\n    mcastaddr: 239.255.2.1\n    mcastport: 9998\n  }\n}\n\nnodelist {\n  node {\n        ring0_addr: rh7-1\n        nodeid: 1\n       }\n  node {\n        ring0_addr: rh7-2\n        nodeid: 2\n       }\n}\n [...]
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udp
+    rrp_mode: passive
+
+    interface {
+        ringnumber: 0
+        bindnetaddr: 1.1.1.0
+        mcastaddr: 239.255.1.1
+        mcastport: 9999
+    }
+
+    interface {
+        ringnumber: 1
+        bindnetaddr: 1.1.2.0
+        mcastaddr: 239.255.2.1
+        mcastport: 9998
+    }
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    two_node: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
+
+    def test_cluster_setup_rrp_passive_udp_addr01_ttl01(self):
+        if utils.is_rhel6():
+            return
 
-        o,r = pcs("cluster setup --force --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --ttl0 4 --ttl1 5 --transport udp")
+        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --ttl0 4 --ttl1 5 --transport udp")
         ac(o,"")
         assert r == 0
         with open("corosync.conf.tmp") as f:
             data = f.read()
-            ac(data,'totem {\nversion: 2\nsecauth: off\ncluster_name: cname\ntransport: udp\nrrp_mode: passive\n  interface {\n    ringnumber: 0\n    bindnetaddr: 1.1.1.0\n    mcastaddr: 239.255.1.1\n    mcastport: 5405\n    ttl: 4\n  }\n  interface {\n    ringnumber: 1\n    bindnetaddr: 1.1.2.0\n    mcastaddr: 239.255.2.1\n    mcastport: 5405\n    ttl: 5\n  }\n}\n\nnodelist {\n  node {\n        ring0_addr: rh7-1\n        nodeid: 1\n       }\n  node {\n        ring0_addr: rh7-2\n         [...]
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udp
+    rrp_mode: passive
+
+    interface {
+        ringnumber: 0
+        bindnetaddr: 1.1.1.0
+        mcastaddr: 239.255.1.1
+        mcastport: 5405
+        ttl: 4
+    }
+
+    interface {
+        ringnumber: 1
+        bindnetaddr: 1.1.2.0
+        mcastaddr: 239.255.2.1
+        mcastport: 5405
+        ttl: 5
+    }
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    two_node: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
+
+    def test_cluster_setup_rrp_active_udp_addr01(self):
+        if utils.is_rhel6():
+            return
+
+        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp")
+        ac(o, "Error: using a RRP mode of 'active' is not supported or tested, use --force to override\n")
+        assert r == 1
 
         o,r = pcs("cluster setup --force --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp")
-        ac(o,"")
+        ac(o, "Warning: using a RRP mode of 'active' is not supported or tested\n")
         assert r == 0
         with open("corosync.conf.tmp") as f:
             data = f.read()
-            ac(data,'totem {\nversion: 2\nsecauth: off\ncluster_name: cname\ntransport: udp\nrrp_mode: active\n  interface {\n    ringnumber: 0\n    bindnetaddr: 1.1.1.0\n    mcastaddr: 239.255.1.1\n    mcastport: 5405\n  }\n  interface {\n    ringnumber: 1\n    bindnetaddr: 1.1.2.0\n    mcastaddr: 239.255.2.1\n    mcastport: 5405\n  }\n}\n\nnodelist {\n  node {\n        ring0_addr: rh7-1\n        nodeid: 1\n       }\n  node {\n        ring0_addr: rh7-2\n        nodeid: 2\n       }\n}\n\ [...]
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udp
+    rrp_mode: active
+
+    interface {
+        ringnumber: 0
+        bindnetaddr: 1.1.1.0
+        mcastaddr: 239.255.1.1
+        mcastport: 5405
+    }
+
+    interface {
+        ringnumber: 1
+        bindnetaddr: 1.1.2.0
+        mcastaddr: 239.255.2.1
+        mcastport: 5405
+    }
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    two_node: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
 
-        o,r = pcs("cluster setup --force --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp")
-        ac(o,"")
-        assert r == 0
-        with open("corosync.conf.tmp") as f:
-            data = f.read()
-            ac(data,'totem {\nversion: 2\nsecauth: off\ncluster_name: cname\ntransport: udp\nrrp_mode: active\n  interface {\n    ringnumber: 0\n    bindnetaddr: 1.1.1.0\n    broadcast: yes\n  }\n  interface {\n    ringnumber: 1\n    bindnetaddr: 1.1.2.0\n    mcastaddr: 239.255.2.1\n    mcastport: 5405\n  }\n}\n\nnodelist {\n  node {\n        ring0_addr: rh7-1\n        nodeid: 1\n       }\n  node {\n        ring0_addr: rh7-2\n        nodeid: 2\n       }\n}\n\nquorum {\nprovider: corosync [...]
+    def test_cluster_setup_rrp_active_udp_broadcast_addr01(self):
+        if utils.is_rhel6():
+            return
 
-        o,r = pcs("cluster setup --force --local --corosync_conf=corosync.conf.tmp --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2,192.168.99.3")
-        ac(o,"Error: You cannot specify more than two addresses for a node: rh7-2,192.168.99.2,192.168.99.3\n")
+        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp")
+        ac(o, "Error: using a RRP mode of 'active' is not supported or tested, use --force to override\n")
         assert r == 1
 
-        o,r = pcs("cluster setup --force --local --corosync_conf=corosync.conf.tmp --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2")
-        ac(o,"")
+        o,r = pcs("cluster setup --force --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp")
+        ac(o, "Warning: using a RRP mode of 'active' is not supported or tested\n")
         assert r == 0
         with open("corosync.conf.tmp") as f:
             data = f.read()
-            ac(data,'totem {\nversion: 2\nsecauth: off\ncluster_name: cname\ntransport: udpu\nrrp_mode: passive\n}\n\nnodelist {\n  node {\n        ring0_addr: rh7-1\n        ring1_addr: 192.168.99.1\n        nodeid: 1\n       }\n  node {\n        ring0_addr: rh7-2\n        ring1_addr: 192.168.99.2\n        nodeid: 2\n       }\n}\n\nquorum {\nprovider: corosync_votequorum\ntwo_node: 1\n}\n\nlogging {\nto_syslog: yes\n}\n')
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udp
+    rrp_mode: active
+
+    interface {
+        ringnumber: 0
+        bindnetaddr: 1.1.1.0
+        broadcast: yes
+    }
+
+    interface {
+        ringnumber: 1
+        bindnetaddr: 1.1.2.0
+        mcastaddr: 239.255.2.1
+        mcastport: 5405
+    }
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    two_node: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
 
-        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1,192.168.99.1 rh7-2")
-        ac(o,"Error: if one node is configured for RRP, all nodes must configured for RRP\n")
-        assert r == 1
+    def test_cluster_setup_rrp_udpu(self):
+        if utils.is_rhel6():
+            return
 
-        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr0 1.1.2.0")
+        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2,192.168.99.3")
+        ac(o,"Error: You cannot specify more than two addresses for a node: rh7-2,192.168.99.2,192.168.99.3\n")
         assert r == 1
-        ac(o, "Error: --addr0 can only be used once\n")
 
-        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname nonexistant-address")
+        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1,192.168.99.1 rh7-2")
+        ac(o,"Error: if one node is configured for RRP, all nodes must be configured for RRP\n")
         assert r == 1
-        ac(o,"Error: Unable to resolve all hostnames (use --force to override).\nWarning: Unable to resolve hostname: nonexistant-address\n")
 
-        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname nonexistant-address --force")
-        assert r == 0
-        ac(o,"Warning: Unable to resolve hostname: nonexistant-address\n")
+        o,r = pcs("cluster setup --force --local --name test99 rh7-1 rh7-2 --addr0 1.1.1.1")
+        ac(o,"Error: --addr0 and --addr1 can only be used with --transport=udp\n")
+        assert r == 1
 
-        o,r = pcs("cluster setup --force --local --corosync_conf=corosync.conf.tmp --name test99 rh7-1 rh7-2 --wait_for_all=2 --auto_tie_breaker=3 --last_man_standing=4 --last_man_standing_window=5")
+        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2")
         ac(o,"")
         assert r == 0
         with open("corosync.conf.tmp") as f:
             data = f.read()
-            ac(data,'totem {\nversion: 2\nsecauth: off\ncluster_name: test99\ntransport: udpu\n}\n\nnodelist {\n  node {\n        ring0_addr: rh7-1\n        nodeid: 1\n       }\n  node {\n        ring0_addr: rh7-2\n        nodeid: 2\n       }\n}\n\nquorum {\nprovider: corosync_votequorum\nwait_for_all: 2\nauto_tie_breaker: 3\nlast_man_standing: 4\nlast_man_standing_window: 5\ntwo_node: 1\n}\n\nlogging {\nto_syslog: yes\n}\n')
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: cname
+    transport: udpu
+    rrp_mode: passive
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        ring1_addr: 192.168.99.1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        ring1_addr: 192.168.99.2
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    two_node: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
 
-        o,r = pcs("cluster setup --force --local --corosync_conf=corosync.conf.tmp --name test99 rh7-1 rh7-2 --wait_for_all=1 --auto_tie_breaker=1 --last_man_standing=1 --last_man_standing_window=12000")
-        ac(o,"")
-        assert r == 0
-        with open("corosync.conf.tmp") as f:
-            data = f.read()
-            ac(data,'totem {\nversion: 2\nsecauth: off\ncluster_name: test99\ntransport: udpu\n}\n\nnodelist {\n  node {\n        ring0_addr: rh7-1\n        nodeid: 1\n       }\n  node {\n        ring0_addr: rh7-2\n        nodeid: 2\n       }\n}\n\nquorum {\nprovider: corosync_votequorum\nwait_for_all: 1\nauto_tie_breaker: 1\nlast_man_standing: 1\nlast_man_standing_window: 12000\ntwo_node: 1\n}\n\nlogging {\nto_syslog: yes\n}\n')
+    def test_cluster_setup_quorum_options(self):
+        if utils.is_rhel6():
+            return
 
-        o,r = pcs("cluster setup --force --local --name test99 rh7-1 rh7-2 --addr0 1.1.1.1")
+        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name test99 rh7-1 rh7-2 --wait_for_all=2")
+        ac(o, "Error: '2' is not a valid value for --wait_for_all, use 0 or 1\n")
         assert r == 1
-        ac(o,"Error: --addr0 and --addr1 can only be used with --transport=udp\n")
 
-        os.remove("corosync.conf.tmp")
-        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp")
+        o,r = pcs("cluster setup --force --local --corosync_conf=corosync.conf.tmp --name test99 rh7-1 rh7-2 --wait_for_all=2")
+        ac(o, "Error: '2' is not a valid value for --wait_for_all, use 0 or 1\n")
         assert r == 1
-        ac("Error: using a RRP mode of 'active' is not supported or tested, use --force to override\n",o)
 
-        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode blah --broadcast0 --transport udp")
+        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name test99 rh7-1 rh7-2 --auto_tie_breaker=2")
+        ac(o, "Error: '2' is not a valid value for --auto_tie_breaker, use 0 or 1\n")
         assert r == 1
-        ac("Error: blah is an unknown RRP mode, use --force to override\n", o)
 
-        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode passive --broadcast0 --transport udp")
-        assert r == 0
-        ac("", o)
+        o,r = pcs("cluster setup --force --local --corosync_conf=corosync.conf.tmp --name test99 rh7-1 rh7-2 --auto_tie_breaker=2")
+        ac(o, "Error: '2' is not a valid value for --auto_tie_breaker, use 0 or 1\n")
+        assert r == 1
+
+        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name test99 rh7-1 rh7-2 --last_man_standing=2")
+        ac(o, "Error: '2' is not a valid value for --last_man_standing, use 0 or 1\n")
+        assert r == 1
+
+        o,r = pcs("cluster setup --force --local --corosync_conf=corosync.conf.tmp --name test99 rh7-1 rh7-2 --last_man_standing=2")
+        ac(o, "Error: '2' is not a valid value for --last_man_standing, use 0 or 1\n")
+        assert r == 1
 
-        os.remove("corosync.conf.tmp")
-        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --broadcast0 --transport udp")
+        o,r = pcs("cluster setup --force --local --corosync_conf=corosync.conf.tmp --name test99 rh7-1 rh7-2 --wait_for_all=1 --auto_tie_breaker=1 --last_man_standing=1 --last_man_standing_window=12000")
+        ac(o,"")
         assert r == 0
-        ac("", o)
+        with open("corosync.conf.tmp") as f:
+            data = f.read()
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: test99
+    transport: udpu
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    wait_for_all: 1
+    auto_tie_breaker: 1
+    last_man_standing: 1
+    last_man_standing_window: 12000
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
 
-    def testRRPConfigRhel6(self):
+    def test_cluster_setup_rrp_passive_udp_addr01_rhel6(self):
         if not utils.is_rhel6():
             return
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --transport udp --force --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0"
+            "cluster setup --local --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr0 1.1.2.0"
+        )
+        ac(output, "Error: --addr0 can only be used once\n")
+        self.assertEqual(returnVal, 1)
+
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode blah --broadcast0 --transport udp"
+        )
+        ac(output, """\
+Error: blah is an unknown RRP mode, use --force to override
+Warning: Enabling broadcast for ring 1 as CMAN does not support broadcast in only one ring
+""")
+        self.assertEqual(returnVal, 1)
+
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --transport udp --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0"
         )
         ac(output, "")
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
 
         with open("cluster.conf.tmp") as f:
             data = f.read()
-        ac(data, """\
+            ac(data, """\
 <cluster config_version="14" name="cname">
   <fence_daemon/>
   <clusternodes>
@@ -724,16 +1784,20 @@ Warning: --ipv6 ignored as it is not supported on CMAN clusters
 </cluster>
 """)
 
+    def test_cluster_setup_rrp_passive_udp_addr01_mcast01_rhel6(self):
+        if not utils.is_rhel6():
+            return
+
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --transport udp --force --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcast0 8.8.8.8 --addr1 1.1.2.0 --mcast1 9.9.9.9"
+            "cluster setup --transport udp --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcast0 8.8.8.8 --addr1 1.1.2.0 --mcast1 9.9.9.9"
         )
         ac(output, "")
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
 
         with open("cluster.conf.tmp") as f:
             data = f.read()
-        ac(data, """\
+            ac(data, """\
 <cluster config_version="14" name="cname">
   <fence_daemon/>
   <clusternodes>
@@ -769,16 +1833,20 @@ Warning: --ipv6 ignored as it is not supported on CMAN clusters
 </cluster>
 """)
 
+    def test_cluster_setup_rrp_passive_udp_addr01_mcastport01_rhel6(self):
+        if not utils.is_rhel6():
+            return
+
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --transport udp --force --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcastport0 9999 --mcastport1 9998 --addr1 1.1.2.0"
+            "cluster setup --transport udp --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --mcastport0 9999 --mcastport1 9998 --addr1 1.1.2.0"
         )
         ac(output, "")
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
 
         with open("cluster.conf.tmp") as f:
             data = f.read()
-        ac(data, """\
+            ac(data, """\
 <cluster config_version="14" name="cname">
   <fence_daemon/>
   <clusternodes>
@@ -814,16 +1882,20 @@ Warning: --ipv6 ignored as it is not supported on CMAN clusters
 </cluster>
 """)
 
+    def test_cluster_setup_rrp_passive_udp_addr01_ttl01_rhel6(self):
+        if not utils.is_rhel6():
+            return
+
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --force --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --ttl0 4 --ttl1 5 --transport udp"
+            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --ttl0 4 --ttl1 5 --transport udp"
         )
         ac(output, "")
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
 
         with open("cluster.conf.tmp") as f:
             data = f.read()
-        ac(data, """\
+            ac(data, """\
 <cluster config_version="14" name="cname">
   <fence_daemon/>
   <clusternodes>
@@ -859,16 +1931,32 @@ Warning: --ipv6 ignored as it is not supported on CMAN clusters
 </cluster>
 """)
 
+    def test_cluster_setup_rrp_active_udp_addr01_rhel6(self):
+        if not utils.is_rhel6():
+            return
+
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --force --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
+            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
         )
-        ac(output, "")
-        self.assertEquals(returnVal, 0)
+        ac(
+            output,
+            "Error: using a RRP mode of 'active' is not supported or tested, use --force to override\n"
+        )
+        self.assertEqual(returnVal, 1)
 
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --force --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --transport udp"
+        )
+        ac(
+            output,
+            "Warning: using a RRP mode of 'active' is not supported or tested\n"
+        )
+        self.assertEqual(returnVal, 0)
         with open("cluster.conf.tmp") as f:
             data = f.read()
-        ac(data, """\
+            ac(data, """\
 <cluster config_version="14" name="cname">
   <fence_daemon/>
   <clusternodes>
@@ -904,18 +1992,32 @@ Warning: --ipv6 ignored as it is not supported on CMAN clusters
 </cluster>
 """)
 
+    def test_cluster_setup_rrp_active_udp_broadcast_addr01_rhel6(self):
+        if not utils.is_rhel6():
+            return
+
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
+        )
+        ac(output, """\
+Error: using a RRP mode of 'active' is not supported or tested, use --force to override
+Warning: Enabling broadcast for ring 1 as CMAN does not support broadcast in only one ring
+""")
+        self.assertEqual(returnVal, 1)
+
         output, returnVal = pcs(
             temp_cib,
             "cluster setup --force --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
         )
         ac(output, """\
 Warning: Enabling broadcast for ring 1 as CMAN does not support broadcast in only one ring
+Warning: using a RRP mode of 'active' is not supported or tested
 """)
-        self.assertEquals(returnVal, 0)
-
+        self.assertEqual(returnVal, 0)
         with open("cluster.conf.tmp") as f:
             data = f.read()
-        ac(data, """\
+            ac(data, """\
 <cluster config_version="12" name="cname">
   <fence_daemon/>
   <clusternodes>
@@ -948,25 +2050,47 @@ Warning: Enabling broadcast for ring 1 as CMAN does not support broadcast in onl
 </cluster>
 """)
 
+    def test_cluster_setup_rrp_udpu_rhel6(self):
+        if not utils.is_rhel6():
+            return
+
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --force --local --cluster_conf=cluster.conf.tmp --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2,192.168.99.3"
+            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2,192.168.99.3"
         )
         ac(output, """\
 Error: You cannot specify more than two addresses for a node: rh7-2,192.168.99.2,192.168.99.3
 """)
-        self.assertEquals(returnVal, 1)
+        self.assertEqual(returnVal, 1)
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --force --local --cluster_conf=cluster.conf.tmp --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2"
+            "cluster setup --local --name cname rh7-1,192.168.99.1 rh7-2"
         )
-        ac(output, "")
-        self.assertEquals(returnVal, 0)
+        ac(output, """\
+Error: if one node is configured for RRP, all nodes must be configured for RRP
+""")
+        self.assertEqual(returnVal, 1)
 
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --force --local --name test99 rh7-1 rh7-2 --addr0 1.1.1.1 --transport=udpu"
+        )
+        ac(output, """\
+Error: --addr0 and --addr1 can only be used with --transport=udp
+Warning: Using udpu transport on a CMAN cluster, cluster restart is required after node add or remove
+""")
+        self.assertEqual(returnVal, 1)
+
+        output, returnVal = pcs(
+            temp_cib,
+            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1,192.168.99.1 rh7-2,192.168.99.2"
+        )
+        ac(output, "")
+        self.assertEqual(returnVal, 0)
         with open("cluster.conf.tmp") as f:
             data = f.read()
-        ac(data, """\
+            ac(data, """\
 <cluster config_version="12" name="cname">
   <fence_daemon/>
   <clusternodes>
@@ -999,43 +2123,76 @@ Error: You cannot specify more than two addresses for a node: rh7-2,192.168.99.2
 </cluster>
 """)
 
-        output, returnVal = pcs(
-            temp_cib,
-            "cluster setup --local --name cname rh7-1,192.168.99.1 rh7-2"
-        )
-        ac(output, """\
-Error: if one node is configured for RRP, all nodes must configured for RRP
-""")
-        self.assertEquals(returnVal, 1)
+    def test_cluster_setup_broadcast_rhel6(self):
+        if not utils.is_rhel6():
+            return
 
-        output, returnVal = pcs(
-            temp_cib,
-            "cluster setup --local --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr0 1.1.2.0"
-        )
-        ac(output, "Error: --addr0 can only be used once\n")
-        self.assertEquals(returnVal, 1)
+        cluster_conf = """\
+<cluster config_version="12" name="cname">
+  <fence_daemon/>
+  <clusternodes>
+    <clusternode name="rh7-1" nodeid="1">
+      <altname name="1.1.2.0"/>
+      <fence>
+        <method name="pcmk-method">
+          <device name="pcmk-redirect" port="rh7-1"/>
+        </method>
+      </fence>
+    </clusternode>
+    <clusternode name="rh7-2" nodeid="2">
+      <altname name="1.1.2.0"/>
+      <fence>
+        <method name="pcmk-method">
+          <device name="pcmk-redirect" port="rh7-2"/>
+        </method>
+      </fence>
+    </clusternode>
+  </clusternodes>
+  <cman broadcast="yes" expected_votes="1" transport="udpb" two_node="1"/>
+  <fencedevices>
+    <fencedevice agent="fence_pcmk" name="pcmk-redirect"/>
+  </fencedevices>
+  <rm>
+    <failoverdomains/>
+    <resources/>
+  </rm>
+  <totem rrp_mode="passive"/>
+</cluster>
+"""
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --name cname nonexistant-address"
+            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode passive --broadcast0 --transport udp"
         )
         ac(output, """\
-Error: Unable to resolve all hostnames (use --force to override).\nWarning: Unable to resolve hostname: nonexistant-address
+Warning: Enabling broadcast for ring 1 as CMAN does not support broadcast in only one ring
 """)
-        self.assertEquals(returnVal, 1)
+        self.assertEqual(returnVal, 0)
+        with open("cluster.conf.tmp") as f:
+            data = f.read()
+            ac(data, cluster_conf)
+
+        os.remove("cluster.conf.tmp")
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname nonexistant-address --force"
+            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --broadcast0 --transport udp"
         )
         ac(output, """\
-Warning: Unable to resolve hostname: nonexistant-address
+Warning: Enabling broadcast for ring 1 as CMAN does not support broadcast in only one ring
 """)
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
+        with open("cluster.conf.tmp") as f:
+            data = f.read()
+            ac(data, cluster_conf)
+
+    def test_cluster_setup_quorum_options_rhel6(self):
+        if not utils.is_rhel6():
+            return
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --force --local --cluster_conf=cluster.conf.tmp --name test99 rh7-1 rh7-2 --wait_for_all=2 --auto_tie_breaker=3 --last_man_standing=4 --last_man_standing_window=5"
+            "cluster setup --local --cluster_conf=cluster.conf.tmp --name test99 rh7-1 rh7-2 --wait_for_all=2 --auto_tie_breaker=3 --last_man_standing=4 --last_man_standing_window=5"
         )
         ac(output, """\
 Warning: --wait_for_all ignored as it is not supported on CMAN clusters
@@ -1043,11 +2200,10 @@ Warning: --auto_tie_breaker ignored as it is not supported on CMAN clusters
 Warning: --last_man_standing ignored as it is not supported on CMAN clusters
 Warning: --last_man_standing_window ignored as it is not supported on CMAN clusters
 """)
-        self.assertEquals(returnVal, 0)
-
+        self.assertEqual(returnVal, 0)
         with open("cluster.conf.tmp") as f:
             data = f.read()
-        ac(data, """\
+            ac(data, """\
 <cluster config_version="9" name="test99">
   <fence_daemon/>
   <clusternodes>
@@ -1077,79 +2233,67 @@ Warning: --last_man_standing_window ignored as it is not supported on CMAN clust
 </cluster>
 """)
 
-        output, returnVal = pcs(
-            temp_cib,
-            "cluster setup --force --local --name test99 rh7-1 rh7-2 --addr0 1.1.1.1 --transport=udpu"
-        )
-        ac(output, """\
-Error: --addr0 and --addr1 can only be used with --transport=udp
-Warning: Using udpu transport on a CMAN cluster, cluster restart is required after node add or remove
-""")
-        self.assertEquals(returnVal, 1)
-
-        os.remove("cluster.conf.tmp")
-        output, returnVal = pcs(
-            temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode active --broadcast0 --transport udp"
-        )
-        ac(output, """\
-Error: using a RRP mode of 'active' is not supported or tested, use --force to override
-""")
-        self.assertEquals(returnVal, 1)
-
-        output, returnVal = pcs(
-            temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode blah --broadcast0 --transport udp"
-        )
-        ac(output, """\
-Error: blah is an unknown RRP mode, use --force to override
-""")
-        self.assertEquals(returnVal, 1)
-
-        output, returnVal = pcs(
-            temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --rrpmode passive --broadcast0 --transport udp"
-        )
-        ac(output, """\
-Warning: Enabling broadcast for ring 1 as CMAN does not support broadcast in only one ring
-""")
-        self.assertEquals(returnVal, 0)
-
-        os.remove("cluster.conf.tmp")
-        output, returnVal = pcs(
-            temp_cib,
-            "cluster setup --local --cluster_conf=cluster.conf.tmp --name cname rh7-1 rh7-2 --addr0 1.1.1.0 --addr1 1.1.2.0 --broadcast0 --transport udp"
-        )
-        ac(output, """\
-Warning: Enabling broadcast for ring 1 as CMAN does not support broadcast in only one ring
-""")
-        self.assertEquals(returnVal, 0)
-
-    def testTotemOptions(self):
+    def test_cluster_setup_totem_options(self):
         if utils.is_rhel6():
             return
 
-        o,r = pcs("cluster setup --force --local --corosync_conf=corosync.conf.tmp --name test99 rh7-1 rh7-2 --token 20000 --join 20001 --consensus 20002 --miss_count_const 20003 --fail_recv_const 20004 --token_coefficient 20005")
+        o,r = pcs("cluster setup --local --corosync_conf=corosync.conf.tmp --name test99 rh7-1 rh7-2 --token 20000 --join 20001 --consensus 20002 --miss_count_const 20003 --fail_recv_const 20004 --token_coefficient 20005")
         ac(o,"")
         assert r == 0
         with open("corosync.conf.tmp") as f:
             data = f.read()
-            ac(data,'totem {\nversion: 2\nsecauth: off\ncluster_name: test99\ntransport: udpu\ntoken: 20000\ntoken_coefficient: 20005\njoin: 20001\nconsensus: 20002\nmiss_count_const: 20003\nfail_recv_const: 20004\n}\n\nnodelist {\n  node {\n        ring0_addr: rh7-1\n        nodeid: 1\n       }\n  node {\n        ring0_addr: rh7-2\n        nodeid: 2\n       }\n}\n\nquorum {\nprovider: corosync_votequorum\ntwo_node: 1\n}\n\nlogging {\nto_syslog: yes\n}\n')
+            ac(data, """\
+totem {
+    version: 2
+    secauth: off
+    cluster_name: test99
+    transport: udpu
+    token: 20000
+    token_coefficient: 20005
+    join: 20001
+    consensus: 20002
+    miss_count_const: 20003
+    fail_recv_const: 20004
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+    two_node: 1
+}
+
+logging {
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+}
+""")
 
-    def testTotemOptionsRhel6(self):
+    def test_cluster_setup_totem_options_rhel6(self):
         if not utils.is_rhel6():
             return
 
         output, returnVal = pcs(
             temp_cib,
-            "cluster setup --force --local --cluster_conf=cluster.conf.tmp --name test99 rh7-1 rh7-2 --token 20000 --join 20001 --consensus 20002 --miss_count_const 20003 --fail_recv_const 20004 --token_coefficient 20005")
+            "cluster setup --local --cluster_conf=cluster.conf.tmp --name test99 rh7-1 rh7-2 --token 20000 --join 20001 --consensus 20002 --miss_count_const 20003 --fail_recv_const 20004 --token_coefficient 20005")
         ac(output, """\
 Warning: --token_coefficient ignored as it is not supported on CMAN clusters
 """)
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
         with open("cluster.conf.tmp") as f:
             data = f.read()
-        ac(data, """\
+            ac(data, """\
 <cluster config_version="10" name="test99">
   <fence_daemon/>
   <clusternodes>
@@ -1183,6 +2327,7 @@ Warning: --token_coefficient ignored as it is not supported on CMAN clusters
     def testUIDGID(self):
         if utils.is_rhel6():
             os.system("cp cluster.conf cluster.conf.tmp")
+
             o,r = pcs("cluster uidgid --cluster_conf=cluster.conf.tmp")
             assert r == 0
             ac(o, "No uidgids configured in cluster.conf\n")
@@ -1305,7 +2450,7 @@ Warning: --token_coefficient ignored as it is not supported on CMAN clusters
 
     def testClusterUpgrade(self):
         if not isMinimumPacemakerVersion(1,1,11):
-            print "WARNING: Unable to test cluster upgrade because pacemaker is older than 1.1.11"
+            print("WARNING: Unable to test cluster upgrade because pacemaker is older than 1.1.11")
             return
         with open(temp_cib) as myfile:
             data = myfile.read()
diff --git a/pcs/test/test_constraints.py b/pcs/test/test_constraints.py
index 4841afd..2c9f963 100644
--- a/pcs/test/test_constraints.py
+++ b/pcs/test/test_constraints.py
@@ -1,12 +1,21 @@
-import os,sys
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os
+import sys
 import shutil
 import unittest
 parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-sys.path.insert(0,parentdir) 
+sys.path.insert(0,parentdir)
+
 import utils
-from pcs_test_functions import pcs,ac,isMinimumPacemakerVersion
+from pcs_test_functions import pcs, ac, isMinimumPacemakerVersion
+
 
 empty_cib = "empty.xml"
+empty_cib_1_2 = "empty-1.2.xml"
 temp_cib = "temp.xml"
 large_cib = "large.xml"
 temp_large_cib = "temp-large.xml"
@@ -215,6 +224,28 @@ Colocation Constraints:
         ac(o,"Location Constraints:\nOrdering Constraints:\n  stop D1 then stop D2 (kind:Mandatory) (id:order-D1-D2-mandatory)\n  start D1 then start D2 (kind:Mandatory) (id:order-D1-D2-mandatory-1)\nColocation Constraints:\n")
         assert r == 0
 
+    def testOrderConstraintRequireAll(self):
+        if not isMinimumPacemakerVersion(1,1,12):
+            print("WARNING: Pacemaker version is too old (must be >= 1.1.12) to test require-all")
+            return
+
+        o,r = pcs("cluster cib-upgrade")
+        ac(o,"Cluster CIB has been upgraded to latest version\n")
+        assert r == 0
+
+        o,r = pcs("constraint order start D1 then start D2 require-all=false")
+        ac(o,"Adding D1 D2 (kind: Mandatory) (Options: require-all=false first-action=start then-action=start)\n")
+        assert r == 0
+
+        o,r = pcs("constraint --full")
+        ac(o, """\
+Location Constraints:
+Ordering Constraints:
+  start D1 then start D2 (kind:Mandatory) (Options: require-all=false) (id:order-D1-D2-mandatory)
+Colocation Constraints:
+""")
+        assert r == 0
+
     def testAllConstraints(self):
         output, returnVal = pcs(temp_cib, "constraint location D5 prefers node1")
         assert returnVal == 0 and output == "", output
@@ -361,6 +392,22 @@ Colocation Constraints:
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == ""
 
+        o, r = pcs(temp_cib, "constraint colocation set")
+        assert o.startswith("\nUsage: pcs constraint")
+        assert r == 1
+
+        o, r = pcs(temp_cib, "constraint colocation set D7 D8 set")
+        assert o.startswith("\nUsage: pcs constraint")
+        assert r == 1
+
+        o, r = pcs(temp_cib, "constraint colocation set D7 D8 set set D8 D9")
+        assert o.startswith("\nUsage: pcs constraint")
+        assert r == 1
+
+        o, r = pcs(temp_cib, "constraint colocation set setoptions score=100")
+        assert o.startswith("\nUsage: pcs constraint")
+        assert r == 1
+
         o, r = pcs(temp_cib, "constraint colocation set D5 D6 D7 sequential=false require-all=true set D8 D9 sequential=true require-all=false action=start role=Stopped setoptions score=INFINITY ")
         ac(o,"")
         assert r == 0
@@ -377,13 +424,13 @@ Colocation Constraints:
         ac(o, """\
 Colocation Constraints:
   Resource Sets:
-    set D5 D6 D7 sequential=false require-all=true (id:pcs_rsc_set_D5_D6_D7) set D8 D9 action=start role=Stopped sequential=true require-all=false (id:pcs_rsc_set_D8_D9) setoptions score=INFINITY (id:pcs_rsc_colocation_D5_D6_D7_set_D8_D9)
-    set D5 D6 (id:pcs_rsc_set_D5_D6) setoptions score=INFINITY (id:pcs_rsc_colocation_D5_D6)
-    set D5 D6 action=stop role=Started (id:pcs_rsc_set_D5_D6-1) set D7 D8 action=promote role=Slave (id:pcs_rsc_set_D7_D8) set D8 D9 action=demote role=Master (id:pcs_rsc_set_D8_D9-1) setoptions score=INFINITY (id:pcs_rsc_colocation_D5_D6_set_D7_D8_set_D8_D9)
+    set D5 D6 D7 sequential=false require-all=true (id:pcs_rsc_set_D5_D6_D7) set D8 D9 action=start role=Stopped sequential=true require-all=false (id:pcs_rsc_set_D8_D9) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D5_D6_D7_set_D8_D9)
+    set D5 D6 (id:pcs_rsc_set_D5_D6) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D5_D6)
+    set D5 D6 action=stop role=Started (id:pcs_rsc_set_D5_D6-1) set D7 D8 action=promote role=Slave (id:pcs_rsc_set_D7_D8) set D8 D9 action=demote role=Master (id:pcs_rsc_set_D8_D9-1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D5_D6_set_D7_D8_set_D8_D9)
 """)
         assert r == 0
 
-        o, r = pcs(temp_cib, "constraint remove pcs_rsc_colocation_D5_D6")
+        o, r = pcs(temp_cib, "constraint remove pcs_rsc_colocation_set_D5_D6")
         ac(o,"")
         assert r == 0
 
@@ -391,8 +438,8 @@ Colocation Constraints:
         ac(o, """\
 Colocation Constraints:
   Resource Sets:
-    set D5 D6 D7 sequential=false require-all=true (id:pcs_rsc_set_D5_D6_D7) set D8 D9 action=start role=Stopped sequential=true require-all=false (id:pcs_rsc_set_D8_D9) setoptions score=INFINITY (id:pcs_rsc_colocation_D5_D6_D7_set_D8_D9)
-    set D5 D6 action=stop role=Started (id:pcs_rsc_set_D5_D6-1) set D7 D8 action=promote role=Slave (id:pcs_rsc_set_D7_D8) set D8 D9 action=demote role=Master (id:pcs_rsc_set_D8_D9-1) setoptions score=INFINITY (id:pcs_rsc_colocation_D5_D6_set_D7_D8_set_D8_D9)
+    set D5 D6 D7 sequential=false require-all=true (id:pcs_rsc_set_D5_D6_D7) set D8 D9 action=start role=Stopped sequential=true require-all=false (id:pcs_rsc_set_D8_D9) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D5_D6_D7_set_D8_D9)
+    set D5 D6 action=stop role=Started (id:pcs_rsc_set_D5_D6-1) set D7 D8 action=promote role=Slave (id:pcs_rsc_set_D7_D8) set D8 D9 action=demote role=Master (id:pcs_rsc_set_D8_D9-1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D5_D6_set_D7_D8_set_D8_D9)
 """)
         assert r == 0
 
@@ -405,52 +452,52 @@ Colocation Constraints:
         assert r == 0
         
         o, r = pcs(temp_cib, "constraint ref D7")
-        ac(o,"Resource: D7\n  pcs_rsc_colocation_D5_D6_D7_set_D8_D9\n  pcs_rsc_colocation_D5_D6_set_D7_D8_set_D8_D9\n")
+        ac(o,"Resource: D7\n  pcs_rsc_colocation_set_D5_D6_D7_set_D8_D9\n  pcs_rsc_colocation_set_D5_D6_set_D7_D8_set_D8_D9\n")
         assert r == 0
         
         o, r = pcs(temp_cib, "constraint ref D8")
-        ac(o,"Resource: D8\n  pcs_rsc_colocation_D5_D6_D7_set_D8_D9\n  pcs_rsc_colocation_D5_D6_set_D7_D8_set_D8_D9\n")
+        ac(o,"Resource: D8\n  pcs_rsc_colocation_set_D5_D6_D7_set_D8_D9\n  pcs_rsc_colocation_set_D5_D6_set_D7_D8_set_D8_D9\n")
         assert r == 0
         
         output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 sequential=foo")
         ac(output, "Error: invalid value 'foo' of option 'sequential', allowed values are: true, false\n")
-        self.assertEquals(1, retValue)
+        self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 require-all=foo")
         ac(output, "Error: invalid value 'foo' of option 'require-all', allowed values are: true, false\n")
-        self.assertEquals(1, retValue)
+        self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 role=foo")
         ac(output, "Error: invalid value 'foo' of option 'role', allowed values are: Stopped, Started, Master, Slave\n")
-        self.assertEquals(1, retValue)
+        self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 action=foo")
         ac(output, "Error: invalid value 'foo' of option 'action', allowed values are: start, promote, demote, stop\n")
-        self.assertEquals(1, retValue)
+        self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 foo=bar")
         ac(output, "Error: invalid option 'foo', allowed options are: action, role, sequential, require-all\n")
-        self.assertEquals(1, retValue)
+        self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 setoptions foo=bar")
         ac(output, "Error: invalid option 'foo', allowed options are: score, score-attribute, score-attribute-mangle, id\n")
-        self.assertEquals(1, retValue)
+        self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 setoptions score=foo")
         ac(output, "Error: invalid score 'foo', use integer or INFINITY or -INFINITY\n")
-        self.assertEquals(1, retValue)
+        self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 setoptions score=100 score-attribute=foo")
         ac(output, "Error: you cannot specify multiple score options\n")
-        self.assertEquals(1, retValue)
+        self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint colocation set D1 D2 setoptions score-attribute=foo")
         ac(output, "")
-        self.assertEquals(0, retValue)
+        self.assertEqual(0, retValue)
 
     def testConstraintResourceDiscovery(self):
         if not isMinimumPacemakerVersion(1,1,12):
-            print "WARNING: Pacemaker version is too old (must be >= 1.1.12) to test resource-discovery"
+            print("WARNING: Pacemaker version is too old (must be >= 1.1.12) to test resource-discovery")
             return
 
         o,r = pcs("resource create crd Dummy")
@@ -585,6 +632,22 @@ Colocation Constraints:
         output, returnVal = pcs(temp_cib, line)
         assert returnVal == 0 and output == ""
 
+        o, r = pcs(temp_cib, "constraint order set")
+        assert o.startswith("\nUsage: pcs constraint")
+        assert r == 1
+
+        o, r = pcs(temp_cib, "constraint order set D7 D8 set")
+        assert o.startswith("\nUsage: pcs constraint")
+        assert r == 1
+
+        o, r = pcs(temp_cib, "constraint order set D7 D8 set set D8 D9")
+        assert o.startswith("\nUsage: pcs constraint")
+        assert r == 1
+
+        o, r = pcs(temp_cib, "constraint order set setoptions score=100")
+        assert o.startswith("\nUsage: pcs constraint")
+        assert r == 1
+
         o, r = pcs(temp_cib, "constraint order set D5 D6 D7 sequential=false require-all=true set D8 D9 sequential=true require-all=false action=start role=Stopped")
         ac(o,"")
         assert r == 0
@@ -602,12 +665,12 @@ Colocation Constraints:
         ac(o,"""\
 Ordering Constraints:
   Resource Sets:
-    set D5 D6 D7 sequential=false require-all=true (id:pcs_rsc_set_D5_D6_D7) set D8 D9 action=start role=Stopped sequential=true require-all=false (id:pcs_rsc_set_D8_D9) (id:pcs_rsc_order_D5_D6_D7_set_D8_D9)
-    set D5 D6 (id:pcs_rsc_set_D5_D6) (id:pcs_rsc_order_D5_D6)
-    set D5 D6 action=stop role=Started (id:pcs_rsc_set_D5_D6-1) set D7 D8 action=promote role=Slave (id:pcs_rsc_set_D7_D8) set D8 D9 action=demote role=Master (id:pcs_rsc_set_D8_D9-1) (id:pcs_rsc_order_D5_D6_set_D7_D8_set_D8_D9)
+    set D5 D6 D7 sequential=false require-all=true (id:pcs_rsc_set_D5_D6_D7) set D8 D9 action=start role=Stopped sequential=true require-all=false (id:pcs_rsc_set_D8_D9) (id:pcs_rsc_order_set_D5_D6_D7_set_D8_D9)
+    set D5 D6 (id:pcs_rsc_set_D5_D6) (id:pcs_rsc_order_set_D5_D6)
+    set D5 D6 action=stop role=Started (id:pcs_rsc_set_D5_D6-1) set D7 D8 action=promote role=Slave (id:pcs_rsc_set_D7_D8) set D8 D9 action=demote role=Master (id:pcs_rsc_set_D8_D9-1) (id:pcs_rsc_order_set_D5_D6_set_D7_D8_set_D8_D9)
 """)
 
-        o, r = pcs(temp_cib, "constraint remove pcs_rsc_order_D5_D6")
+        o, r = pcs(temp_cib, "constraint remove pcs_rsc_order_set_D5_D6")
         assert r == 0
         ac(o,"")
 
@@ -616,8 +679,8 @@ Ordering Constraints:
         ac(o,"""\
 Ordering Constraints:
   Resource Sets:
-    set D5 D6 D7 sequential=false require-all=true (id:pcs_rsc_set_D5_D6_D7) set D8 D9 action=start role=Stopped sequential=true require-all=false (id:pcs_rsc_set_D8_D9) (id:pcs_rsc_order_D5_D6_D7_set_D8_D9)
-    set D5 D6 action=stop role=Started (id:pcs_rsc_set_D5_D6-1) set D7 D8 action=promote role=Slave (id:pcs_rsc_set_D7_D8) set D8 D9 action=demote role=Master (id:pcs_rsc_set_D8_D9-1) (id:pcs_rsc_order_D5_D6_set_D7_D8_set_D8_D9)
+    set D5 D6 D7 sequential=false require-all=true (id:pcs_rsc_set_D5_D6_D7) set D8 D9 action=start role=Stopped sequential=true require-all=false (id:pcs_rsc_set_D8_D9) (id:pcs_rsc_order_set_D5_D6_D7_set_D8_D9)
+    set D5 D6 action=stop role=Started (id:pcs_rsc_set_D5_D6-1) set D7 D8 action=promote role=Slave (id:pcs_rsc_set_D7_D8) set D8 D9 action=demote role=Master (id:pcs_rsc_set_D8_D9-1) (id:pcs_rsc_order_set_D5_D6_set_D7_D8_set_D8_D9)
 """)
         
         o, r = pcs(temp_cib, "resource delete D5")
@@ -630,23 +693,23 @@ Ordering Constraints:
 
         output, retValue = pcs(temp_cib, "constraint order set D1 D2 sequential=foo")
         ac(output, "Error: invalid value 'foo' of option 'sequential', allowed values are: true, false\n")
-        self.assertEquals(1, retValue)
+        self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint order set D1 D2 require-all=foo")
         ac(output, "Error: invalid value 'foo' of option 'require-all', allowed values are: true, false\n")
-        self.assertEquals(1, retValue)
+        self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint order set D1 D2 role=foo")
         ac(output, "Error: invalid value 'foo' of option 'role', allowed values are: Stopped, Started, Master, Slave\n")
-        self.assertEquals(1, retValue)
+        self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint order set D1 D2 action=foo")
         ac(output, "Error: invalid value 'foo' of option 'action', allowed values are: start, promote, demote, stop\n")
-        self.assertEquals(1, retValue)
+        self.assertEqual(1, retValue)
 
         output, retValue = pcs(temp_cib, "constraint order set D1 D2 foo=bar")
         ac(output, "Error: invalid option 'foo', allowed options are: action, role, sequential, require-all\n")
-        self.assertEquals(1, retValue)
+        self.assertEqual(1, retValue)
 
         output, retValue = pcs(
             temp_cib,
@@ -655,7 +718,7 @@ Ordering Constraints:
         ac(output, """\
 Error: invalid option 'foo', allowed options are: kind, symmetrical, id
 """)
-        self.assertEquals(1, retValue)
+        self.assertEqual(1, retValue)
 
         output, retValue = pcs(
             temp_cib,
@@ -664,7 +727,7 @@ Error: invalid option 'foo', allowed options are: kind, symmetrical, id
         ac(output, """\
 Error: invalid kind value 'foo', allowed values are: Optional, Mandatory, Serialize
 """)
-        self.assertEquals(1, retValue)
+        self.assertEqual(1, retValue)
 
         output, retValue = pcs(
             temp_cib,
@@ -673,26 +736,26 @@ Error: invalid kind value 'foo', allowed values are: Optional, Mandatory, Serial
         ac(output, """\
 Error: invalid symmetrical value 'foo', allowed values are: true, false
 """)
-        self.assertEquals(1, retValue)
+        self.assertEqual(1, retValue)
 
         output, retValue = pcs(
             temp_cib,
             "constraint order set D1 D2 setoptions symmetrical=false kind=mandatory"
         )
         ac(output, "")
-        self.assertEquals(0, retValue)
+        self.assertEqual(0, retValue)
 
         output, retValue = pcs(temp_cib, "constraint --full")
         ac(output, """\
 Location Constraints:
 Ordering Constraints:
   Resource Sets:
-    set D7 sequential=false require-all=true (id:pcs_rsc_set_D5_D6_D7) set D8 D9 action=start role=Stopped sequential=true require-all=false (id:pcs_rsc_set_D8_D9) (id:pcs_rsc_order_D5_D6_D7_set_D8_D9)
-    set D7 D8 action=promote role=Slave (id:pcs_rsc_set_D7_D8) set D8 D9 action=demote role=Master (id:pcs_rsc_set_D8_D9-1) (id:pcs_rsc_order_D5_D6_set_D7_D8_set_D8_D9)
-    set D1 D2 (id:pcs_rsc_set_D1_D2) setoptions symmetrical=false kind=Mandatory (id:pcs_rsc_order_D1_D2)
+    set D7 sequential=false require-all=true (id:pcs_rsc_set_D5_D6_D7) set D8 D9 action=start role=Stopped sequential=true require-all=false (id:pcs_rsc_set_D8_D9) (id:pcs_rsc_order_set_D5_D6_D7_set_D8_D9)
+    set D7 D8 action=promote role=Slave (id:pcs_rsc_set_D7_D8) set D8 D9 action=demote role=Master (id:pcs_rsc_set_D8_D9-1) (id:pcs_rsc_order_set_D5_D6_set_D7_D8_set_D8_D9)
+    set D1 D2 (id:pcs_rsc_set_D1_D2) setoptions symmetrical=false kind=Mandatory (id:pcs_rsc_order_set_D1_D2)
 Colocation Constraints:
 """)
-        self.assertEquals(0, retValue)
+        self.assertEqual(0, retValue)
 
     def testLocationConstraintRule(self):
         o, r = pcs(temp_cib, "constraint location D1 prefers rh7-1")
@@ -850,13 +913,15 @@ Colocation Constraints:
         ac(o,"")
         assert r == 0
 
-        o,r = pcs("resource create stateful1 stateful --master")
+        o,r = pcs("resource create stateful1 ocf:pacemaker:Stateful --master")
         ac(o, """\
 Warning: changing a monitor operation interval from 10 to 11 to make the operation unique
 """)
         assert r == 0
 
-        o,r = pcs("resource create stateful2 stateful --group statefulG")
+        o,r = pcs(
+            "resource create stateful2 ocf:pacemaker:Stateful --group statefulG"
+        )
         ac(o, """\
 Warning: changing a monitor operation interval from 10 to 11 to make the operation unique
 """)
@@ -954,58 +1019,62 @@ Location Constraints:
 Ordering Constraints:
   start stateful1 then start dummy1 (kind:Mandatory) (id:order-stateful1-dummy1-mandatory)
   Resource Sets:
-    set stateful1 dummy1 (id:pcs_rsc_set_stateful1_dummy1) (id:pcs_rsc_order_stateful1_dummy1)
+    set stateful1 dummy1 (id:pcs_rsc_set_stateful1_dummy1) (id:pcs_rsc_order_set_stateful1_dummy1)
 Colocation Constraints:
   stateful1 with dummy1 (score:INFINITY) (id:colocation-stateful1-dummy1-INFINITY)
   Resource Sets:
-    set stateful1 dummy1 (id:pcs_rsc_set_stateful1_dummy1-1) setoptions score=INFINITY (id:pcs_rsc_colocation_stateful1_dummy1)
+    set stateful1 dummy1 (id:pcs_rsc_set_stateful1_dummy1-1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_stateful1_dummy1)
 """)
         assert r == 0
 
     def testMasterSlaveConstraintAutocorrect(self):
         output, returnVal = pcs("resource create dummy1 dummy")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
-        output, returnVal = pcs("resource create stateful1 stateful --master")
+        output, returnVal = pcs(
+            "resource create stateful1 ocf:pacemaker:Stateful --master"
+        )
         ac(output, """\
 Warning: changing a monitor operation interval from 10 to 11 to make the operation unique
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
-        output, returnVal = pcs("resource create stateful2 stateful --group statefulG")
+        output, returnVal = pcs(
+            "resource create stateful2 ocf:pacemaker:Stateful --group statefulG"
+        )
         ac(output, """\
 Warning: changing a monitor operation interval from 10 to 11 to make the operation unique
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("resource master statefulG")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint location stateful1 prefers rh7-1 --autocorrect"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint location statefulG prefers rh7-1 --autocorrect"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint location stateful1 rule #uname eq rh7-1 --autocorrect"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint location statefulG rule #uname eq rh7-1 --autocorrect"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint order stateful1 then dummy1 --autocorrect"
@@ -1013,7 +1082,7 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         ac(output, """\
 Adding stateful1-master dummy1 (kind: Mandatory) (Options: first-action=start then-action=start)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint order dummy1 then statefulG --autocorrect"
@@ -1021,43 +1090,43 @@ Adding stateful1-master dummy1 (kind: Mandatory) (Options: first-action=start th
         ac(output, """\
 Adding dummy1 statefulG-master (kind: Mandatory) (Options: first-action=start then-action=start)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint order set stateful1 dummy1 --autocorrect"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint order set dummy1 statefulG --autocorrect"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation add stateful1 with dummy1 --autocorrect"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation add dummy1 with statefulG --autocorrect"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation set dummy1 stateful1 --autocorrect"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation set statefulG dummy1 --autocorrect"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint --full")
         ac(output, """\
@@ -1076,16 +1145,16 @@ Ordering Constraints:
   start stateful1-master then start dummy1 (kind:Mandatory) (id:order-stateful1-master-dummy1-mandatory)
   start dummy1 then start statefulG-master (kind:Mandatory) (id:order-dummy1-statefulG-master-mandatory)
   Resource Sets:
-    set stateful1-master dummy1 (id:pcs_rsc_set_stateful1-master_dummy1) (id:pcs_rsc_order_stateful1_dummy1)
-    set dummy1 statefulG-master (id:pcs_rsc_set_dummy1_statefulG-master) (id:pcs_rsc_order_dummy1_statefulG)
+    set stateful1-master dummy1 (id:pcs_rsc_set_stateful1-master_dummy1) (id:pcs_rsc_order_set_stateful1_dummy1)
+    set dummy1 statefulG-master (id:pcs_rsc_set_dummy1_statefulG-master) (id:pcs_rsc_order_set_dummy1_statefulG)
 Colocation Constraints:
   stateful1-master with dummy1 (score:INFINITY) (id:colocation-stateful1-master-dummy1-INFINITY)
   dummy1 with statefulG-master (score:INFINITY) (id:colocation-dummy1-statefulG-master-INFINITY)
   Resource Sets:
-    set dummy1 stateful1-master (id:pcs_rsc_set_dummy1_stateful1-master) setoptions score=INFINITY (id:pcs_rsc_colocation_dummy1_stateful1)
-    set statefulG-master dummy1 (id:pcs_rsc_set_statefulG-master_dummy1) setoptions score=INFINITY (id:pcs_rsc_colocation_statefulG_dummy1)
+    set dummy1 stateful1-master (id:pcs_rsc_set_dummy1_stateful1-master) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_stateful1)
+    set statefulG-master dummy1 (id:pcs_rsc_set_statefulG-master_dummy1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_statefulG_dummy1)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint location stateful1 rule #uname eq rh7-1 --autocorrect"
@@ -1096,13 +1165,13 @@ Error: duplicate constraint already exists, use --force to override
     Rule: score=INFINITY  (id:location-stateful1-master-rule)
       Expression: #uname eq rh7-1  (id:location-stateful1-master-rule-expr)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             "constraint location stateful1 rule #uname eq rh7-1 --autocorrect --force"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint order stateful1 then dummy1 --autocorrect"
@@ -1111,7 +1180,7 @@ Error: duplicate constraint already exists, use --force to override
 Error: duplicate constraint already exists, use --force to override
   start stateful1-master then start dummy1 (kind:Mandatory) (id:order-stateful1-master-dummy1-mandatory)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             "constraint order stateful1 then dummy1 --autocorrect --force"
@@ -1119,22 +1188,22 @@ Error: duplicate constraint already exists, use --force to override
         ac(output, """\
 Adding stateful1-master dummy1 (kind: Mandatory) (Options: first-action=start then-action=start)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint order set stateful1 dummy1 --autocorrect"
         )
         ac(output, """\
 Error: duplicate constraint already exists, use --force to override
-  set stateful1-master dummy1 (id:pcs_rsc_set_stateful1-master_dummy1) (id:pcs_rsc_order_stateful1_dummy1)
+  set stateful1-master dummy1 (id:pcs_rsc_set_stateful1-master_dummy1) (id:pcs_rsc_order_set_stateful1_dummy1)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             "constraint order set stateful1 dummy1 --autocorrect --force"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation add stateful1 with dummy1 --autocorrect"
@@ -1143,28 +1212,28 @@ Error: duplicate constraint already exists, use --force to override
 Error: duplicate constraint already exists, use --force to override
   stateful1-master with dummy1 (score:INFINITY) (id:colocation-stateful1-master-dummy1-INFINITY)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation add stateful1 with dummy1 --autocorrect --force"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation set dummy1 stateful1 --autocorrect"
         )
         ac(output, """\
 Error: duplicate constraint already exists, use --force to override
-  set dummy1 stateful1-master (id:pcs_rsc_set_dummy1_stateful1-master) setoptions score=INFINITY (id:pcs_rsc_colocation_dummy1_stateful1)
+  set dummy1 stateful1-master (id:pcs_rsc_set_dummy1_stateful1-master) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_stateful1)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation set dummy1 stateful1 --autocorrect --force"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint --full")
         ac(output, """\
@@ -1187,19 +1256,19 @@ Ordering Constraints:
   start dummy1 then start statefulG-master (kind:Mandatory) (id:order-dummy1-statefulG-master-mandatory)
   start stateful1-master then start dummy1 (kind:Mandatory) (id:order-stateful1-master-dummy1-mandatory-1)
   Resource Sets:
-    set stateful1-master dummy1 (id:pcs_rsc_set_stateful1-master_dummy1) (id:pcs_rsc_order_stateful1_dummy1)
-    set dummy1 statefulG-master (id:pcs_rsc_set_dummy1_statefulG-master) (id:pcs_rsc_order_dummy1_statefulG)
-    set stateful1-master dummy1 (id:pcs_rsc_set_stateful1-master_dummy1-1) (id:pcs_rsc_order_stateful1_dummy1-1)
+    set stateful1-master dummy1 (id:pcs_rsc_set_stateful1-master_dummy1) (id:pcs_rsc_order_set_stateful1_dummy1)
+    set dummy1 statefulG-master (id:pcs_rsc_set_dummy1_statefulG-master) (id:pcs_rsc_order_set_dummy1_statefulG)
+    set stateful1-master dummy1 (id:pcs_rsc_set_stateful1-master_dummy1-1) (id:pcs_rsc_order_set_stateful1_dummy1-1)
 Colocation Constraints:
   stateful1-master with dummy1 (score:INFINITY) (id:colocation-stateful1-master-dummy1-INFINITY)
   dummy1 with statefulG-master (score:INFINITY) (id:colocation-dummy1-statefulG-master-INFINITY)
   stateful1-master with dummy1 (score:INFINITY) (id:colocation-stateful1-master-dummy1-INFINITY-1)
   Resource Sets:
-    set dummy1 stateful1-master (id:pcs_rsc_set_dummy1_stateful1-master) setoptions score=INFINITY (id:pcs_rsc_colocation_dummy1_stateful1)
-    set statefulG-master dummy1 (id:pcs_rsc_set_statefulG-master_dummy1) setoptions score=INFINITY (id:pcs_rsc_colocation_statefulG_dummy1)
-    set dummy1 stateful1-master (id:pcs_rsc_set_dummy1_stateful1-master-1) setoptions score=INFINITY (id:pcs_rsc_colocation_dummy1_stateful1-1)
+    set dummy1 stateful1-master (id:pcs_rsc_set_dummy1_stateful1-master) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_stateful1)
+    set statefulG-master dummy1 (id:pcs_rsc_set_statefulG-master_dummy1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_statefulG_dummy1)
+    set dummy1 stateful1-master (id:pcs_rsc_set_dummy1_stateful1-master-1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_stateful1-1)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
     def testCloneConstraint(self):
         os.system("CIB_file="+temp_cib+" cibadmin -R --scope nodes --xml-text '<nodes><node id=\"1\" uname=\"rh7-1\"/><node id=\"2\" uname=\"rh7-2\"/></nodes>'")
@@ -1308,54 +1377,54 @@ Location Constraints:
 Ordering Constraints:
   start dummy then start dummy1 (kind:Mandatory) (id:order-dummy-dummy1-mandatory)
   Resource Sets:
-    set dummy1 dummy (id:pcs_rsc_set_dummy1_dummy) (id:pcs_rsc_order_dummy1_dummy)
+    set dummy1 dummy (id:pcs_rsc_set_dummy1_dummy) (id:pcs_rsc_order_set_dummy1_dummy)
 Colocation Constraints:
   dummy with dummy1 (score:INFINITY) (id:colocation-dummy-dummy1-INFINITY)
   Resource Sets:
-    set dummy1 dummy (id:pcs_rsc_set_dummy1_dummy-1) setoptions score=INFINITY (id:pcs_rsc_colocation_dummy1_dummy)
+    set dummy1 dummy (id:pcs_rsc_set_dummy1_dummy-1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_dummy)
 """)
         assert r == 0
 
     def testCloneConstraintAutocorrect(self):
         output, returnVal = pcs("resource create dummy1 dummy")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("resource create dummy Dummy --clone")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("resource create dummy2 Dummy --group dummyG")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("resource clone dummyG")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint location dummy prefers rh7-1 --autocorrect"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint location dummyG prefers rh7-1 --autocorrect"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint location dummy rule #uname eq rh7-1 --autocorrect"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint location dummyG rule #uname eq rh7-1 --autocorrect"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint order dummy then dummy1 --autocorrect"
@@ -1363,7 +1432,7 @@ Colocation Constraints:
         ac(output, """\
 Adding dummy-clone dummy1 (kind: Mandatory) (Options: first-action=start then-action=start)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint order dummy1 then dummyG --autocorrect"
@@ -1371,43 +1440,43 @@ Adding dummy-clone dummy1 (kind: Mandatory) (Options: first-action=start then-ac
         ac(output, """\
 Adding dummy1 dummyG-clone (kind: Mandatory) (Options: first-action=start then-action=start)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint order set dummy1 dummy --autocorrect"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint order set dummyG dummy1 --autocorrect"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation add dummy with dummy1 --autocorrect"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation add dummy1 with dummyG --autocorrect"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation set dummy1 dummy --autocorrect"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation set dummy1 dummyG --autocorrect"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint --full")
         ac(output, """\
@@ -1426,16 +1495,16 @@ Ordering Constraints:
   start dummy-clone then start dummy1 (kind:Mandatory) (id:order-dummy-clone-dummy1-mandatory)
   start dummy1 then start dummyG-clone (kind:Mandatory) (id:order-dummy1-dummyG-clone-mandatory)
   Resource Sets:
-    set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone) (id:pcs_rsc_order_dummy1_dummy)
-    set dummyG-clone dummy1 (id:pcs_rsc_set_dummyG-clone_dummy1) (id:pcs_rsc_order_dummyG_dummy1)
+    set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone) (id:pcs_rsc_order_set_dummy1_dummy)
+    set dummyG-clone dummy1 (id:pcs_rsc_set_dummyG-clone_dummy1) (id:pcs_rsc_order_set_dummyG_dummy1)
 Colocation Constraints:
   dummy-clone with dummy1 (score:INFINITY) (id:colocation-dummy-clone-dummy1-INFINITY)
   dummy1 with dummyG-clone (score:INFINITY) (id:colocation-dummy1-dummyG-clone-INFINITY)
   Resource Sets:
-    set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone-1) setoptions score=INFINITY (id:pcs_rsc_colocation_dummy1_dummy)
-    set dummy1 dummyG-clone (id:pcs_rsc_set_dummy1_dummyG-clone) setoptions score=INFINITY (id:pcs_rsc_colocation_dummy1_dummyG)
+    set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone-1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_dummy)
+    set dummy1 dummyG-clone (id:pcs_rsc_set_dummy1_dummyG-clone) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_dummyG)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint location dummy rule #uname eq rh7-1 --autocorrect"
@@ -1446,13 +1515,13 @@ Error: duplicate constraint already exists, use --force to override
     Rule: score=INFINITY  (id:location-dummy-clone-rule)
       Expression: #uname eq rh7-1  (id:location-dummy-clone-rule-expr)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             "constraint location dummy rule #uname eq rh7-1 --autocorrect --force"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint order dummy then dummy1 --autocorrect"
@@ -1461,7 +1530,7 @@ Error: duplicate constraint already exists, use --force to override
 Error: duplicate constraint already exists, use --force to override
   start dummy-clone then start dummy1 (kind:Mandatory) (id:order-dummy-clone-dummy1-mandatory)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             "constraint order dummy then dummy1 --autocorrect --force"
@@ -1469,22 +1538,22 @@ Error: duplicate constraint already exists, use --force to override
         ac(output, """\
 Adding dummy-clone dummy1 (kind: Mandatory) (Options: first-action=start then-action=start)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint order set dummy1 dummy --autocorrect"
         )
         ac(output, """\
 Error: duplicate constraint already exists, use --force to override
-  set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone) (id:pcs_rsc_order_dummy1_dummy)
+  set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone) (id:pcs_rsc_order_set_dummy1_dummy)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             "constraint order set dummy1 dummy --autocorrect --force"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation add dummy with dummy1 --autocorrect"
@@ -1493,28 +1562,28 @@ Error: duplicate constraint already exists, use --force to override
 Error: duplicate constraint already exists, use --force to override
   dummy-clone with dummy1 (score:INFINITY) (id:colocation-dummy-clone-dummy1-INFINITY)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation add dummy with dummy1 --autocorrect --force"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation set dummy1 dummy --autocorrect"
         )
         ac(output, """\
 Error: duplicate constraint already exists, use --force to override
-  set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone-1) setoptions score=INFINITY (id:pcs_rsc_colocation_dummy1_dummy)
+  set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone-1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_dummy)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation set dummy1 dummy --autocorrect --force"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint --full")
         ac(output, """\
@@ -1537,23 +1606,23 @@ Ordering Constraints:
   start dummy1 then start dummyG-clone (kind:Mandatory) (id:order-dummy1-dummyG-clone-mandatory)
   start dummy-clone then start dummy1 (kind:Mandatory) (id:order-dummy-clone-dummy1-mandatory-1)
   Resource Sets:
-    set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone) (id:pcs_rsc_order_dummy1_dummy)
-    set dummyG-clone dummy1 (id:pcs_rsc_set_dummyG-clone_dummy1) (id:pcs_rsc_order_dummyG_dummy1)
-    set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone-2) (id:pcs_rsc_order_dummy1_dummy-1)
+    set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone) (id:pcs_rsc_order_set_dummy1_dummy)
+    set dummyG-clone dummy1 (id:pcs_rsc_set_dummyG-clone_dummy1) (id:pcs_rsc_order_set_dummyG_dummy1)
+    set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone-2) (id:pcs_rsc_order_set_dummy1_dummy-1)
 Colocation Constraints:
   dummy-clone with dummy1 (score:INFINITY) (id:colocation-dummy-clone-dummy1-INFINITY)
   dummy1 with dummyG-clone (score:INFINITY) (id:colocation-dummy1-dummyG-clone-INFINITY)
   dummy-clone with dummy1 (score:INFINITY) (id:colocation-dummy-clone-dummy1-INFINITY-1)
   Resource Sets:
-    set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone-1) setoptions score=INFINITY (id:pcs_rsc_colocation_dummy1_dummy)
-    set dummy1 dummyG-clone (id:pcs_rsc_set_dummy1_dummyG-clone) setoptions score=INFINITY (id:pcs_rsc_colocation_dummy1_dummyG)
-    set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone-3) setoptions score=INFINITY (id:pcs_rsc_colocation_dummy1_dummy-1)
+    set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone-1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_dummy)
+    set dummy1 dummyG-clone (id:pcs_rsc_set_dummy1_dummyG-clone) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_dummyG)
+    set dummy1 dummy-clone (id:pcs_rsc_set_dummy1_dummy-clone-3) setoptions score=INFINITY (id:pcs_rsc_colocation_set_dummy1_dummy-1)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
     def testMissingRole(self):
         os.system("CIB_file="+temp_cib+" cibadmin -R --scope nodes --xml-text '<nodes><node id=\"1\" uname=\"rh7-1\"/><node id=\"2\" uname=\"rh7-2\"/></nodes>'")
-        o,r = pcs("resource create stateful0 Stateful --master")
+        o,r = pcs("resource create stateful0 ocf:pacemaker:Stateful --master")
         os.system("CIB_file="+temp_cib+" cibadmin -R --scope constraints --xml-text '<constraints><rsc_location id=\"cli-prefer-stateful0-master\" role=\"Master\" rsc=\"stateful0-master\" node=\"rh7-1\" score=\"INFINITY\"/><rsc_location id=\"cli-ban-stateful0-master-on-rh7-1\" rsc=\"stateful0-master\" role=\"Slave\" node=\"rh7-1\" score=\"-INFINITY\"/></constraints>'")
 
         o,r = pcs("constraint")
@@ -1768,36 +1837,38 @@ Colocation Constraints:
         assert returnVal == 0
 
     def testRemoteNodeConstraintsRemove(self):
+        # constraints referencing the remote node's name,
+        # deleting the remote node resource
         output, returnVal = pcs(
             temp_cib,
             'resource create vm-guest1 VirtualDomain hypervisor="qemu:///system" config="/root/guest1.xml" meta remote-node=guest1'
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib, "constraint location D1 prefers node1=100"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib, "constraint location D1 prefers guest1=200"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib, "constraint location D2 avoids node2=300"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib, "constraint location D2 avoids guest1=400"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint --full")
         ac(output, """\
@@ -1811,7 +1882,7 @@ Location Constraints:
 Ordering Constraints:
 Colocation Constraints:
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource delete vm-guest1")
         ac(output, """\
@@ -1819,7 +1890,7 @@ Removing Constraint - location-D1-guest1-200
 Removing Constraint - location-D2-guest1--400
 Deleting Resource - vm-guest1
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint --full")
         ac(output, """\
@@ -1831,26 +1902,28 @@ Location Constraints:
 Ordering Constraints:
 Colocation Constraints:
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
+        # constraints referencing the remote node's name,
+        # removing the remote node
         output, returnVal = pcs(
             temp_cib,
             'resource create vm-guest1 VirtualDomain hypervisor="qemu:///system" config="/root/guest1.xml" meta remote-node=guest1'
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib, "constraint location D1 prefers guest1=200"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib, "constraint location D2 avoids guest1=400"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint --full")
         ac(output, """\
@@ -1864,13 +1937,13 @@ Location Constraints:
 Ordering Constraints:
 Colocation Constraints:
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib, "cluster remote-node remove guest1"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint --full")
         ac(output, """\
@@ -1882,27 +1955,55 @@ Location Constraints:
 Ordering Constraints:
 Colocation Constraints:
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource delete vm-guest1")
+        ac(output, """\
+Deleting Resource - vm-guest1
+""")
+        self.assertEqual(0, returnVal)
+
+        # constraints referencing the remote node resource
+        # deleting the remote node resource
+        output, returnVal = pcs(
+            temp_cib,
+            'resource create vm-guest1 VirtualDomain hypervisor="qemu:///system" config="/root/guest1.xml" meta remote-node=guest1'
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib, "constraint location vm-guest1 prefers node1"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource delete vm-guest1")
+        ac(output, """\
+Removing Constraint - location-vm-guest1-node1-INFINITY
+Deleting Resource - vm-guest1
+""")
+        self.assertEqual(0, returnVal)
 
     def testDuplicateOrder(self):
         output, returnVal = pcs("constraint order D1 then D2")
         ac(output, """\
 Adding D1 D2 (kind: Mandatory) (Options: first-action=start then-action=start)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint order D1 then D2")
         ac(output, """\
 Error: duplicate constraint already exists, use --force to override
   start D1 then start D2 (kind:Mandatory) (id:order-D1-D2-mandatory)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs("constraint order D1 then D2 --force")
         ac(output, """\
 Adding D1 D2 (kind: Mandatory) (Options: first-action=start then-action=start)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint order start D1 then start D2")
         ac(output, """\
@@ -1910,7 +2011,7 @@ Error: duplicate constraint already exists, use --force to override
   start D1 then start D2 (kind:Mandatory) (id:order-D1-D2-mandatory)
   start D1 then start D2 (kind:Mandatory) (id:order-D1-D2-mandatory-1)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             "constraint order start D1 then start D2 --force"
@@ -1918,20 +2019,20 @@ Error: duplicate constraint already exists, use --force to override
         ac(output, """\
 Adding D1 D2 (kind: Mandatory) (Options: first-action=start then-action=start)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint order start D2 then start D5")
         ac(output, """\
 Adding D2 D5 (kind: Mandatory) (Options: first-action=start then-action=start)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint order start D2 then start D5")
         ac(output, """\
 Error: duplicate constraint already exists, use --force to override
   start D2 then start D5 (kind:Mandatory) (id:order-D2-D5-mandatory)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             "constraint order start D2 then start D5 --force"
@@ -1939,26 +2040,26 @@ Error: duplicate constraint already exists, use --force to override
         ac(output, """\
 Adding D2 D5 (kind: Mandatory) (Options: first-action=start then-action=start)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint order stop D5 then stop D6")
         ac(output, """\
 Adding D5 D6 (kind: Mandatory) (Options: first-action=stop then-action=stop)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint order stop D5 then stop D6")
         ac(output, """\
 Error: duplicate constraint already exists, use --force to override
   stop D5 then stop D6 (kind:Mandatory) (id:order-D5-D6-mandatory)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs("constraint order stop D5 then stop D6 --force")
         ac(output, """\
 Adding D5 D6 (kind: Mandatory) (Options: first-action=stop then-action=stop)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint --full")
         ac(output, """\
@@ -1973,32 +2074,32 @@ Ordering Constraints:
   stop D5 then stop D6 (kind:Mandatory) (id:order-D5-D6-mandatory-1)
 Colocation Constraints:
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
     def testDuplicateColocation(self):
         output, returnVal = pcs("constraint colocation add D1 with D2")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint colocation add D1 with D2")
         ac(output, """\
 Error: duplicate constraint already exists, use --force to override
   D1 with D2 (score:INFINITY) (id:colocation-D1-D2-INFINITY)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs("constraint colocation add D1 with D2 50")
         ac(output, """\
 Error: duplicate constraint already exists, use --force to override
   D1 with D2 (score:INFINITY) (id:colocation-D1-D2-INFINITY)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation add D1 with D2 50 --force"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation add started D1 with started D2"
@@ -2008,25 +2109,25 @@ Error: duplicate constraint already exists, use --force to override
   D1 with D2 (score:INFINITY) (id:colocation-D1-D2-INFINITY)
   D1 with D2 (score:50) (id:colocation-D1-D2-50)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation add started D1 with started D2 --force"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation add started D2 with started D5"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation add stopped D2 with stopped D5"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation add stopped D2 with stopped D5"
@@ -2035,13 +2136,13 @@ Error: duplicate constraint already exists, use --force to override
 Error: duplicate constraint already exists, use --force to override
   D2 with D5 (score:INFINITY) (rsc-role:Stopped) (with-rsc-role:Stopped) (id:colocation-D2-D5-INFINITY-1)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation add stopped D2 with stopped D5 --force"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint --full")
         ac(output, """\
@@ -2059,75 +2160,75 @@ Colocation Constraints:
     def testDuplicateSetConstraints(self):
         output, returnVal = pcs("constraint order set D1 D2")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint order set D1 D2")
         ac(output, """\
 Error: duplicate constraint already exists, use --force to override
-  set D1 D2 (id:pcs_rsc_set_D1_D2) (id:pcs_rsc_order_D1_D2)
+  set D1 D2 (id:pcs_rsc_set_D1_D2) (id:pcs_rsc_order_set_D1_D2)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs("constraint order set D1 D2 --force")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint order set D1 D2 set D5 D6")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint order set D1 D2 set D5 D6")
         ac(output, """\
 Error: duplicate constraint already exists, use --force to override
-  set D1 D2 (id:pcs_rsc_set_D1_D2-2) set D5 D6 (id:pcs_rsc_set_D5_D6) (id:pcs_rsc_order_D1_D2_set_D5_D6)
+  set D1 D2 (id:pcs_rsc_set_D1_D2-2) set D5 D6 (id:pcs_rsc_set_D5_D6) (id:pcs_rsc_order_set_D1_D2_set_D5_D6)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs("constraint order set D1 D2 set D5 D6 --force")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
 
         output, returnVal = pcs("constraint colocation set D1 D2")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint colocation set D1 D2")
         ac(output, """\
 Error: duplicate constraint already exists, use --force to override
-  set D1 D2 (id:pcs_rsc_set_D1_D2-4) setoptions score=INFINITY (id:pcs_rsc_colocation_D1_D2)
+  set D1 D2 (id:pcs_rsc_set_D1_D2-4) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D1_D2)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs("constraint colocation set D1 D2 --force")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint colocation set D1 D2 set D5 D6")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint colocation set D1 D2 set D5 D6")
         ac(output, """\
 Error: duplicate constraint already exists, use --force to override
-  set D1 D2 (id:pcs_rsc_set_D1_D2-6) set D5 D6 (id:pcs_rsc_set_D5_D6-2) setoptions score=INFINITY (id:pcs_rsc_colocation_D1_D2_set_D5_D6)
+  set D1 D2 (id:pcs_rsc_set_D1_D2-6) set D5 D6 (id:pcs_rsc_set_D5_D6-2) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D1_D2_set_D5_D6)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             "constraint colocation set D1 D2 set D5 D6 --force"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
 
         output, returnVal = pcs("constraint colocation set D6 D1")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint order set D6 D1")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
 
         output, returnVal = pcs(temp_cib, "constraint --full")
@@ -2135,24 +2236,24 @@ Error: duplicate constraint already exists, use --force to override
 Location Constraints:
 Ordering Constraints:
   Resource Sets:
-    set D1 D2 (id:pcs_rsc_set_D1_D2) (id:pcs_rsc_order_D1_D2)
-    set D1 D2 (id:pcs_rsc_set_D1_D2-1) (id:pcs_rsc_order_D1_D2-1)
-    set D1 D2 (id:pcs_rsc_set_D1_D2-2) set D5 D6 (id:pcs_rsc_set_D5_D6) (id:pcs_rsc_order_D1_D2_set_D5_D6)
-    set D1 D2 (id:pcs_rsc_set_D1_D2-3) set D5 D6 (id:pcs_rsc_set_D5_D6-1) (id:pcs_rsc_order_D1_D2_set_D5_D6-1)
-    set D6 D1 (id:pcs_rsc_set_D6_D1-1) (id:pcs_rsc_order_D6_D1)
+    set D1 D2 (id:pcs_rsc_set_D1_D2) (id:pcs_rsc_order_set_D1_D2)
+    set D1 D2 (id:pcs_rsc_set_D1_D2-1) (id:pcs_rsc_order_set_D1_D2-1)
+    set D1 D2 (id:pcs_rsc_set_D1_D2-2) set D5 D6 (id:pcs_rsc_set_D5_D6) (id:pcs_rsc_order_set_D1_D2_set_D5_D6)
+    set D1 D2 (id:pcs_rsc_set_D1_D2-3) set D5 D6 (id:pcs_rsc_set_D5_D6-1) (id:pcs_rsc_order_set_D1_D2_set_D5_D6-1)
+    set D6 D1 (id:pcs_rsc_set_D6_D1-1) (id:pcs_rsc_order_set_D6_D1)
 Colocation Constraints:
   Resource Sets:
-    set D1 D2 (id:pcs_rsc_set_D1_D2-4) setoptions score=INFINITY (id:pcs_rsc_colocation_D1_D2)
-    set D1 D2 (id:pcs_rsc_set_D1_D2-5) setoptions score=INFINITY (id:pcs_rsc_colocation_D1_D2-1)
-    set D1 D2 (id:pcs_rsc_set_D1_D2-6) set D5 D6 (id:pcs_rsc_set_D5_D6-2) setoptions score=INFINITY (id:pcs_rsc_colocation_D1_D2_set_D5_D6)
-    set D1 D2 (id:pcs_rsc_set_D1_D2-7) set D5 D6 (id:pcs_rsc_set_D5_D6-3) setoptions score=INFINITY (id:pcs_rsc_colocation_D1_D2_set_D5_D6-1)
-    set D6 D1 (id:pcs_rsc_set_D6_D1) setoptions score=INFINITY (id:pcs_rsc_colocation_D6_D1)
+    set D1 D2 (id:pcs_rsc_set_D1_D2-4) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D1_D2)
+    set D1 D2 (id:pcs_rsc_set_D1_D2-5) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D1_D2-1)
+    set D1 D2 (id:pcs_rsc_set_D1_D2-6) set D5 D6 (id:pcs_rsc_set_D5_D6-2) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D1_D2_set_D5_D6)
+    set D1 D2 (id:pcs_rsc_set_D1_D2-7) set D5 D6 (id:pcs_rsc_set_D5_D6-3) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D1_D2_set_D5_D6-1)
+    set D6 D1 (id:pcs_rsc_set_D6_D1) setoptions score=INFINITY (id:pcs_rsc_colocation_set_D6_D1)
 """)
 
     def testDuplicateLocationRules(self):
         output, returnVal = pcs("constraint location D1 rule #uname eq node1")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint location D1 rule #uname eq node1")
         ac(output, """\
@@ -2161,23 +2262,23 @@ Error: duplicate constraint already exists, use --force to override
     Rule: score=INFINITY  (id:location-D1-rule)
       Expression: #uname eq node1  (id:location-D1-rule-expr)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             "constraint location D1 rule #uname eq node1 --force"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs("constraint location D2 rule #uname eq node1")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint location D2 rule #uname eq node1 or #uname eq node2"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             "constraint location D2 rule #uname eq node1 or #uname eq node2"
@@ -2189,7 +2290,7 @@ Error: duplicate constraint already exists, use --force to override
       Expression: #uname eq node1  (id:location-D2-1-rule-expr)
       Expression: #uname eq node2  (id:location-D2-1-rule-expr-1)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             "constraint location D2 rule #uname eq node2 or #uname eq node1"
@@ -2201,13 +2302,13 @@ Error: duplicate constraint already exists, use --force to override
       Expression: #uname eq node1  (id:location-D2-1-rule-expr)
       Expression: #uname eq node2  (id:location-D2-1-rule-expr-1)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             "constraint location D2 rule #uname eq node2 or #uname eq node1 --force"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint --full")
         ac(output, """\
@@ -2234,7 +2335,7 @@ Location Constraints:
 Ordering Constraints:
 Colocation Constraints:
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
     def testConstraintsCustomId(self):
         output, returnVal = pcs(
@@ -2244,14 +2345,14 @@ Colocation Constraints:
         ac(output, """\
 Error: invalid constraint id '1id', '1' is not a valid first character for a constraint id
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "constraint colocation add D1 with D2 id=id1"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -2260,14 +2361,14 @@ Error: invalid constraint id '1id', '1' is not a valid first character for a con
         ac(output, """\
 Error: id 'id1' is already in use, please specify another one
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "constraint colocation add D2 with D1 100 id=id2"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -2276,14 +2377,14 @@ Error: id 'id1' is already in use, please specify another one
         ac(output, """\
 Error: invalid constraint id '3id', '3' is not a valid first character for a constraint id
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "constraint colocation set D1 D2 setoptions id=id3"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -2292,14 +2393,14 @@ Error: invalid constraint id '3id', '3' is not a valid first character for a con
         ac(output, """\
 Error: id 'id3' is already in use, please specify another one
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "constraint colocation set D2 D1 setoptions score=100 id=id4"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -2308,14 +2409,14 @@ Error: id 'id3' is already in use, please specify another one
         ac(output, """\
 Error: invalid constraint id '5id', '5' is not a valid first character for a constraint id
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "constraint order set D1 D2 setoptions id=id5"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -2324,14 +2425,14 @@ Error: invalid constraint id '5id', '5' is not a valid first character for a con
         ac(output, """\
 Error: id 'id5' is already in use, please specify another one
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "constraint order set D2 D1 setoptions kind=Mandatory id=id6"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -2340,7 +2441,7 @@ Error: id 'id5' is already in use, please specify another one
         ac(output, """\
 Error: invalid constraint id '7id', '7' is not a valid first character for a constraint id
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -2349,7 +2450,7 @@ Error: invalid constraint id '7id', '7' is not a valid first character for a con
         ac(output, """\
 Adding D1 D2 (kind: Mandatory) (Options: id=id7 first-action=start then-action=start)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -2358,7 +2459,7 @@ Adding D1 D2 (kind: Mandatory) (Options: id=id7 first-action=start then-action=s
         ac(output, """\
 Error: id 'id7' is already in use, please specify another one
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -2367,7 +2468,7 @@ Error: id 'id7' is already in use, please specify another one
         ac(output, """\
 Adding D2 D1 (kind: Optional) (Options: id=id8 first-action=start then-action=start)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -2376,14 +2477,14 @@ Adding D2 D1 (kind: Optional) (Options: id=id8 first-action=start then-action=st
         ac(output, """\
 Error: invalid constraint id '9id', '9' is not a valid first character for a constraint id
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "constraint location D1 rule constraint-id=id9 defined pingd"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -2392,14 +2493,14 @@ Error: invalid constraint id '9id', '9' is not a valid first character for a con
         ac(output, """\
 Error: id 'id9' is already in use, please specify another one
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "constraint location D2 rule score=100 constraint-id=id10 id=rule1 defined pingd"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint --full")
         ac(output, """\
@@ -2425,7 +2526,7 @@ Colocation Constraints:
     set D1 D2 (id:pcs_rsc_set_D1_D2) setoptions score=INFINITY (id:id3)
     set D2 D1 (id:pcs_rsc_set_D2_D1) setoptions score=100 (id:id4)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/pcs/test/test_corosync_conf.py b/pcs/test/test_corosync_conf.py
new file mode 100644
index 0000000..541e3f0
--- /dev/null
+++ b/pcs/test/test_corosync_conf.py
@@ -0,0 +1,1182 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os.path
+import sys
+import unittest
+parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+sys.path.insert(0, parentdir)
+
+from pcs_test_functions import pcs, ac
+import corosync_conf
+
+
+class SectionTest(unittest.TestCase):
+
+    def test_empty_section(self):
+        section = corosync_conf.Section("mySection")
+        self.assertEqual(section.parent, None)
+        self.assertEqual(section.get_root(), section)
+        self.assertEqual(section.name, "mySection")
+        self.assertEqual(section.get_attributes(), [])
+        self.assertEqual(section.get_sections(), [])
+        ac(str(section), "")
+
+    def test_attribute_add(self):
+        section = corosync_conf.Section("mySection")
+
+        section.add_attribute("name1", "value1")
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1"],
+            ]
+        )
+
+        section.add_attribute("name2", "value2")
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1"],
+                ["name2", "value2"],
+            ]
+        )
+
+        section.add_attribute("name2", "value2")
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1"],
+                ["name2", "value2"],
+                ["name2", "value2"],
+            ]
+        )
+
+    def test_attribute_get(self):
+        section = corosync_conf.Section("mySection")
+        section.add_attribute("name1", "value1")
+        section.add_attribute("name2", "value2")
+        section.add_attribute("name3", "value3")
+        section.add_attribute("name2", "value2a")
+
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1"],
+                ["name2", "value2"],
+                ["name3", "value3"],
+                ["name2", "value2a"],
+            ]
+        )
+        self.assertEqual(
+            section.get_attributes("name1"),
+            [
+                ["name1", "value1"],
+            ]
+        )
+        self.assertEqual(
+            section.get_attributes("name2"),
+            [
+                ["name2", "value2"],
+                ["name2", "value2a"],
+            ]
+        )
+        self.assertEqual(
+            section.get_attributes("nameX"),
+            []
+        )
+
+    def test_attribute_set(self):
+        section = corosync_conf.Section("mySection")
+
+        section.set_attribute("name1", "value1")
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1"],
+            ]
+        )
+
+        section.set_attribute("name1", "value1")
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1"],
+            ]
+        )
+
+        section.set_attribute("name1", "value1a")
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1a"],
+            ]
+        )
+
+        section.set_attribute("name2", "value2")
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1a"],
+                ["name2", "value2"],
+            ]
+        )
+
+        section.set_attribute("name1", "value1")
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1"],
+                ["name2", "value2"],
+            ]
+        )
+
+        section.add_attribute("name3", "value3")
+        section.add_attribute("name2", "value2")
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1"],
+                ["name2", "value2"],
+                ["name3", "value3"],
+                ["name2", "value2"],
+            ]
+        )
+        section.set_attribute("name2", "value2a")
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1"],
+                ["name2", "value2a"],
+                ["name3", "value3"],
+            ]
+        )
+
+        section.add_attribute("name1", "value1")
+        section.add_attribute("name1", "value1")
+        section.set_attribute("name1", "value1")
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1"],
+                ["name2", "value2a"],
+                ["name3", "value3"],
+            ]
+        )
+
+    def test_attribute_change(self):
+        section = corosync_conf.Section("mySection")
+        section.add_attribute("name1", "value1")
+        section.add_attribute("name2", "value2")
+        section.add_attribute("name3", "value3")
+        section.add_attribute("name2", "value2")
+
+        attr = section.get_attributes()[1]
+        attr[0] = "name2a"
+        attr[1] = "value2a"
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1"],
+                ["name2a", "value2a"],
+                ["name3", "value3"],
+                ["name2", "value2"],
+            ]
+        )
+
+    def test_attribute_del(self):
+        section = corosync_conf.Section("mySection")
+        section.add_attribute("name1", "value1")
+        section.add_attribute("name2", "value2")
+        section.add_attribute("name3", "value3")
+        section.add_attribute("name2", "value2")
+
+        section.del_attribute(section.get_attributes()[1])
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1"],
+                ["name3", "value3"],
+            ]
+        )
+
+        section.del_attribute(["name3", "value3"])
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1"],
+            ]
+        )
+
+        section.del_attribute(["name3", "value3"])
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1"],
+            ]
+        )
+
+    def test_attribute_del_by_name(self):
+        section = corosync_conf.Section("mySection")
+        section.add_attribute("name1", "value1")
+        section.add_attribute("name2", "value2")
+        section.add_attribute("name3", "value3")
+        section.add_attribute("name2", "value2")
+
+        section.del_attributes_by_name("nameX")
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1"],
+                ["name2", "value2"],
+                ["name3", "value3"],
+                ["name2", "value2"],
+            ]
+        )
+
+        section.del_attributes_by_name("name2", "value2")
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1"],
+                ["name3", "value3"],
+            ]
+        )
+
+        section.add_attribute("name2", "value2")
+        section.add_attribute("name2", "value2a")
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1"],
+                ["name3", "value3"],
+                ["name2", "value2"],
+                ["name2", "value2a"],
+            ]
+        )
+        section.del_attributes_by_name("name2", "value2")
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1"],
+                ["name3", "value3"],
+                ["name2", "value2a"],
+            ]
+        )
+
+        section.add_attribute("name3", "value3a")
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1"],
+                ["name3", "value3"],
+                ["name2", "value2a"],
+                ["name3", "value3a"],
+            ]
+        )
+        section.del_attributes_by_name("name3")
+        self.assertEqual(
+            section.get_attributes(),
+            [
+                ["name1", "value1"],
+                ["name2", "value2a"],
+            ]
+        )
+
+    def test_section_add(self):
+        root = corosync_conf.Section("root")
+        child1 = corosync_conf.Section("child1")
+        child1a = corosync_conf.Section("child1a")
+        child2 = corosync_conf.Section("child2")
+
+        root.add_section(child1)
+        child1.add_section(child1a)
+        root.add_section(child2)
+        self.assertEqual(root.parent, None)
+        self.assertEqual(child1.parent.name, "root")
+        self.assertEqual(child1a.parent.name, "child1")
+        self.assertEqual(child2.parent.name, "root")
+        ac(str(root), """\
+child1 {
+    child1a {
+    }
+}
+
+child2 {
+}
+""")
+
+        child2.add_section(child1a)
+        self.assertEqual(child1a.parent.name, "child2")
+        ac(str(root), """\
+child1 {
+}
+
+child2 {
+    child1a {
+    }
+}
+""")
+
+        self.assertRaises(
+            corosync_conf.CircularParentshipException,
+            child1a.add_section, child1a
+        )
+        self.assertRaises(
+            corosync_conf.CircularParentshipException,
+            child1a.add_section, child2
+        )
+        self.assertRaises(
+            corosync_conf.CircularParentshipException,
+            child1a.add_section, root
+        )
+
+    def test_section_get(self):
+        root = corosync_conf.Section("")
+        child1 = corosync_conf.Section("child1")
+        child2 = corosync_conf.Section("child2")
+        childa1 = corosync_conf.Section("childA")
+        childa2 = corosync_conf.Section("childA")
+        childa3 = corosync_conf.Section("childA")
+        childa4 = corosync_conf.Section("childA")
+        childb1 = corosync_conf.Section("childB")
+        childb2 = corosync_conf.Section("childB")
+        childa1.add_attribute("id", "1")
+        childa2.add_attribute("id", "2")
+        childa3.add_attribute("id", "3")
+        childa4.add_attribute("id", "4")
+        childb1.add_attribute("id", "5")
+        childb2.add_attribute("id", "6")
+        root.add_section(child1)
+        root.add_section(child2)
+        child1.add_section(childa1)
+        child1.add_section(childa2)
+        child1.add_section(childb1)
+        child2.add_section(childa3)
+        child2.add_section(childb2)
+        child2.add_section(childa4)
+        ac(str(root), """\
+child1 {
+    childA {
+        id: 1
+    }
+
+    childA {
+        id: 2
+    }
+
+    childB {
+        id: 5
+    }
+}
+
+child2 {
+    childA {
+        id: 3
+    }
+
+    childB {
+        id: 6
+    }
+
+    childA {
+        id: 4
+    }
+}
+""")
+
+        ac(
+            "---\n".join([str(x) for x in root.get_sections()]),
+            """\
+child1 {
+    childA {
+        id: 1
+    }
+
+    childA {
+        id: 2
+    }
+
+    childB {
+        id: 5
+    }
+}
+---
+child2 {
+    childA {
+        id: 3
+    }
+
+    childB {
+        id: 6
+    }
+
+    childA {
+        id: 4
+    }
+}
+""")
+
+        ac(
+            "---\n".join([str(x) for x in root.get_sections("child1")]),
+            """\
+child1 {
+    childA {
+        id: 1
+    }
+
+    childA {
+        id: 2
+    }
+
+    childB {
+        id: 5
+    }
+}
+""")
+
+        ac(
+            "---\n".join([str(x) for x in child1.get_sections("childA")]),
+            """\
+childA {
+    id: 1
+}
+---
+childA {
+    id: 2
+}
+""")
+
+        ac(
+            "---\n".join([str(x) for x in child1.get_sections("child2")]),
+            ""
+        )
+
+    def test_section_del(self):
+        root = corosync_conf.Section("")
+        child1 = corosync_conf.Section("child1")
+        child2 = corosync_conf.Section("child2")
+        childa1 = corosync_conf.Section("childA")
+        childa2 = corosync_conf.Section("childA")
+        childa3 = corosync_conf.Section("childA")
+        childa4 = corosync_conf.Section("childA")
+        childb1 = corosync_conf.Section("childB")
+        childb2 = corosync_conf.Section("childB")
+        childa1.add_attribute("id", "1")
+        childa2.add_attribute("id", "2")
+        childa3.add_attribute("id", "3")
+        childa4.add_attribute("id", "4")
+        childb1.add_attribute("id", "5")
+        childb2.add_attribute("id", "6")
+        root.add_section(child1)
+        root.add_section(child2)
+        child1.add_section(childa1)
+        child1.add_section(childa2)
+        child1.add_section(childb1)
+        child2.add_section(childa3)
+        child2.add_section(childb2)
+        child2.add_section(childa4)
+        ac(str(root), """\
+child1 {
+    childA {
+        id: 1
+    }
+
+    childA {
+        id: 2
+    }
+
+    childB {
+        id: 5
+    }
+}
+
+child2 {
+    childA {
+        id: 3
+    }
+
+    childB {
+        id: 6
+    }
+
+    childA {
+        id: 4
+    }
+}
+""")
+
+        child2.del_section(childb2)
+        self.assertEqual(childb2.parent, None)
+        ac(str(root), """\
+child1 {
+    childA {
+        id: 1
+    }
+
+    childA {
+        id: 2
+    }
+
+    childB {
+        id: 5
+    }
+}
+
+child2 {
+    childA {
+        id: 3
+    }
+
+    childA {
+        id: 4
+    }
+}
+""")
+
+        root.del_section(child2)
+        self.assertEqual(child2.parent, None)
+        ac(str(root), """\
+child1 {
+    childA {
+        id: 1
+    }
+
+    childA {
+        id: 2
+    }
+
+    childB {
+        id: 5
+    }
+}
+""")
+
+        self.assertRaises(ValueError, root.del_section, child2)
+
+        self.assertEqual(childa1.parent.name, "child1")
+        self.assertRaises(ValueError, child2.del_section, childa1)
+        self.assertEqual(childa1.parent.name, "child1")
+
+        child1.del_section(childb1)
+        self.assertEqual(childb1.parent, None)
+        ac(str(root), """\
+child1 {
+    childA {
+        id: 1
+    }
+
+    childA {
+        id: 2
+    }
+}
+""")
+
+        child1.del_section(childa1)
+        self.assertEqual(childa1.parent, None)
+        child1.del_section(childa2)
+        self.assertEqual(childa2.parent, None)
+        ac(str(root), """\
+child1 {
+}
+""")
+
+        root.del_section(child1)
+        self.assertEqual(child1.parent, None)
+        ac(str(root), "")
+
+    def test_get_root(self):
+        root = corosync_conf.Section("root")
+        child1 = corosync_conf.Section("child1")
+        child1a = corosync_conf.Section("child1a")
+        root.add_section(child1)
+        child1.add_section(child1a)
+
+        self.assertEqual(root.get_root().name, "root")
+        self.assertEqual(child1.get_root().name, "root")
+        self.assertEqual(child1a.get_root().name, "root")
+
+    def test_str(self):
+        root = corosync_conf.Section("root")
+        ac(str(root), "")
+
+        root.add_attribute("name1", "value1")
+        ac(str(root), "name1: value1\n")
+
+        root.add_attribute("name2", "value2")
+        root.add_attribute("name2", "value2a")
+        root.add_attribute("name3", "value3")
+        ac(str(root), """\
+name1: value1
+name2: value2
+name2: value2a
+name3: value3
+""")
+
+        child1 = corosync_conf.Section("child1")
+        root.add_section(child1)
+        ac(str(root), """\
+name1: value1
+name2: value2
+name2: value2a
+name3: value3
+
+child1 {
+}
+""")
+
+        child1.add_attribute("name1.1", "value1.1")
+        child1.add_attribute("name1.2", "value1.2")
+        ac(str(root), """\
+name1: value1
+name2: value2
+name2: value2a
+name3: value3
+
+child1 {
+    name1.1: value1.1
+    name1.2: value1.2
+}
+""")
+
+        child2 = corosync_conf.Section("child2")
+        child2.add_attribute("name2.1", "value2.1")
+        root.add_section(child2)
+        ac(str(root), """\
+name1: value1
+name2: value2
+name2: value2a
+name3: value3
+
+child1 {
+    name1.1: value1.1
+    name1.2: value1.2
+}
+
+child2 {
+    name2.1: value2.1
+}
+""")
+
+        child2a = corosync_conf.Section("child2a")
+        child2a.add_attribute("name2.a.1", "value2.a.1")
+        child2.add_section(child2a)
+        ac(str(root), """\
+name1: value1
+name2: value2
+name2: value2a
+name3: value3
+
+child1 {
+    name1.1: value1.1
+    name1.2: value1.2
+}
+
+child2 {
+    name2.1: value2.1
+
+    child2a {
+        name2.a.1: value2.a.1
+    }
+}
+""")
+
+        child3 = corosync_conf.Section("child3")
+        root.add_section(child3)
+        child3.add_section(corosync_conf.Section("child3a"))
+        child3.add_section(corosync_conf.Section("child3b"))
+        ac(str(root), """\
+name1: value1
+name2: value2
+name2: value2a
+name3: value3
+
+child1 {
+    name1.1: value1.1
+    name1.2: value1.2
+}
+
+child2 {
+    name2.1: value2.1
+
+    child2a {
+        name2.a.1: value2.a.1
+    }
+}
+
+child3 {
+    child3a {
+    }
+
+    child3b {
+    }
+}
+""")
+
+
+class ParserTest(unittest.TestCase):
+
+    def test_empty(self):
+        ac(str(corosync_conf.parse_string("")), "")
+
+    def test_attributes(self):
+        string = """\
+name:value\
+"""
+        parsed = """\
+name: value
+"""
+        ac(str(corosync_conf.parse_string(string)), parsed)
+
+        string = """\
+name:value
+name:value
+"""
+        parsed = """\
+name: value
+name: value
+"""
+        ac(str(corosync_conf.parse_string(string)), parsed)
+
+        string = """\
+  name1:value1  
+name2  :value2
+name3:  value3
+  name4  :  value4  
+"""
+        parsed = """\
+name1: value1
+name2: value2
+name3: value3
+name4: value4
+"""
+        ac(str(corosync_conf.parse_string(string)), parsed)
+
+        string = """\
+name:foo:value
+"""
+        parsed = """\
+name: foo:value
+"""
+        root = corosync_conf.parse_string(string)
+        self.assertEqual(root.get_attributes(), [["name", "foo:value"]])
+        ac(str(root), parsed)
+
+        string = """\
+name :  
+"""
+        parsed = """\
+name: 
+"""
+        root = corosync_conf.parse_string(string)
+        self.assertEqual(root.get_attributes(), [["name", ""]])
+        ac(str(root), parsed)
+
+    def test_section(self):
+        string = """\
+section1 {
+}\
+"""
+        parsed = """\
+section1 {
+}
+"""
+        ac(str(corosync_conf.parse_string(string)), parsed)
+
+        string = """\
+section1 {
+    section1a   {
+  }
+  section1b        {       
+     }    
+}
+"""
+        parsed = """\
+section1 {
+    section1a {
+    }
+
+    section1b {
+    }
+}
+"""
+        ac(str(corosync_conf.parse_string(string)), parsed)
+
+        string = """\
+section1 {
+    section1a junk1 { junk2
+    junk3 } junk4
+    section1b junk5{junk6
+    junk7}junk8
+}
+section2 {
+   section2a {
+   }
+   section2b {
+   }
+}
+"""
+        parsed = """\
+section1 {
+    section1a junk1 {
+    }
+
+    section1b junk5 {
+    }
+}
+
+section2 {
+    section2a {
+    }
+
+    section2b {
+    }
+}
+"""
+        ac(str(corosync_conf.parse_string(string)), parsed)
+
+        string = """\
+section1 {
+    section1a {
+    }
+
+    section1b {
+    }
+}
+}
+"""
+        self.assertRaises(
+            corosync_conf.ParseErrorException,
+            corosync_conf.parse_string, string
+        )
+
+        string = """\
+section1 {
+    section1a {
+
+    section1b {
+    }
+}
+"""
+        self.assertRaises(
+            corosync_conf.ParseErrorException,
+            corosync_conf.parse_string, string
+        )
+
+        string = """\
+section1 {
+"""
+        self.assertRaises(
+            corosync_conf.ParseErrorException,
+            corosync_conf.parse_string, string
+        )
+
+        string = """\
+}
+"""
+        self.assertRaises(
+            corosync_conf.ParseErrorException,
+            corosync_conf.parse_string, string
+        )
+
+
+    def test_comment(self):
+        string= """\
+# junk1
+name1: value1
+  #junk2
+name2: value2#junk3
+name3: value3 #junk4
+name4 # junk5: value4
+#junk6 name5: value5
+#junk7
+"""
+        parsed = """\
+name1: value1
+name2: value2#junk3
+name3: value3 #junk4
+name4 # junk5: value4
+"""
+        ac(str(corosync_conf.parse_string(string)), parsed)
+
+        string= """\
+# junk1
+section1 { # junk2
+}
+section2 # junk2 {
+}
+section3 {
+} #junk3
+"""
+        parsed = """\
+section1 {
+}
+
+section2 # junk2 {
+}
+
+section3 {
+}
+"""
+        ac(str(corosync_conf.parse_string(string)), parsed)
+
+        string = """\
+section {
+#}
+"""
+        self.assertRaises(
+            corosync_conf.ParseErrorException,
+            corosync_conf.parse_string, string
+        )
+
+        string = """\
+#section {
+}
+"""
+        self.assertRaises(
+            corosync_conf.ParseErrorException,
+            corosync_conf.parse_string, string
+        )
+
+    def test_full(self):
+        string = """\
+# Please read the corosync.conf.5 manual page
+totem {
+	version: 2
+
+	# crypto_cipher and crypto_hash: Used for mutual node authentication.
+	# If you choose to enable this, then do remember to create a shared
+	# secret with "corosync-keygen".
+	# enabling crypto_cipher, requires also enabling of crypto_hash.
+	crypto_cipher: none
+	crypto_hash: none
+
+	# interface: define at least one interface to communicate
+	# over. If you define more than one interface stanza, you must
+	# also set rrp_mode.
+	interface {
+                # Rings must be consecutively numbered, starting at 0.
+		ringnumber: 0
+		# This is normally the *network* address of the
+		# interface to bind to. This ensures that you can use
+		# identical instances of this configuration file
+		# across all your cluster nodes, without having to
+		# modify this option.
+		bindnetaddr: 192.168.1.0
+		# However, if you have multiple physical network
+		# interfaces configured for the same subnet, then the
+		# network address alone is not sufficient to identify
+		# the interface Corosync should bind to. In that case,
+		# configure the *host* address of the interface
+		# instead:
+		# bindnetaddr: 192.168.1.1
+		# When selecting a multicast address, consider RFC
+		# 2365 (which, among other things, specifies that
+		# 239.255.x.x addresses are left to the discretion of
+		# the network administrator). Do not reuse multicast
+		# addresses across multiple Corosync clusters sharing
+		# the same network.
+		mcastaddr: 239.255.1.1
+		# Corosync uses the port you specify here for UDP
+		# messaging, and also the immediately preceding
+		# port. Thus if you set this to 5405, Corosync sends
+		# messages over UDP ports 5405 and 5404.
+		mcastport: 5405
+		# Time-to-live for cluster communication packets. The
+		# number of hops (routers) that this ring will allow
+		# itself to pass. Note that multicast routing must be
+		# specifically enabled on most network routers.
+		ttl: 1
+	}
+}
+
+logging {
+	# Log the source file and line where messages are being
+	# generated. When in doubt, leave off. Potentially useful for
+	# debugging.
+	fileline: off
+	# Log to standard error. When in doubt, set to no. Useful when
+	# running in the foreground (when invoking "corosync -f")
+	to_stderr: no
+	# Log to a log file. When set to "no", the "logfile" option
+	# must not be set.
+	to_logfile: yes
+	logfile: /var/log/cluster/corosync.log
+	# Log to the system log daemon. When in doubt, set to yes.
+	to_syslog: yes
+	# Log debug messages (very verbose). When in doubt, leave off.
+	debug: off
+	# Log messages with time stamps. When in doubt, set to on
+	# (unless you are only logging to syslog, where double
+	# timestamps can be annoying).
+	timestamp: on
+	logger_subsys {
+		subsys: QUORUM
+		debug: off
+	}
+}
+
+quorum {
+	# Enable and configure quorum subsystem (default: off)
+	# see also corosync.conf.5 and votequorum.5
+	#provider: corosync_votequorum
+}
+"""
+        parsed = """\
+totem {
+    version: 2
+    crypto_cipher: none
+    crypto_hash: none
+
+    interface {
+        ringnumber: 0
+        bindnetaddr: 192.168.1.0
+        mcastaddr: 239.255.1.1
+        mcastport: 5405
+        ttl: 1
+    }
+}
+
+logging {
+    fileline: off
+    to_stderr: no
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+    debug: off
+    timestamp: on
+
+    logger_subsys {
+        subsys: QUORUM
+        debug: off
+    }
+}
+
+quorum {
+}
+"""
+        ac(str(corosync_conf.parse_string(string)), parsed)
+
+        string = """\
+# Please read the corosync.conf.5 manual page
+totem {
+	version: 2
+
+	crypto_cipher: none
+	crypto_hash: none
+
+	interface {
+		ringnumber: 0
+		bindnetaddr: 10.16.35.0
+		mcastport: 5405
+		ttl: 1
+	}
+	transport: udpu
+}
+
+logging {
+	fileline: off
+	to_logfile: yes
+	to_syslog: yes
+	logfile: /var/log/cluster/corosync.log
+	debug: off
+	timestamp: on
+	logger_subsys {
+		subsys: QUORUM
+		debug: off
+	}
+}
+
+nodelist {
+	node {
+		ring0_addr: 10.16.35.101
+		nodeid: 1
+	}
+
+	node {
+		ring0_addr: 10.16.35.102
+		nodeid: 2
+	}
+
+	node {
+		ring0_addr: 10.16.35.103
+	}
+
+	node {
+		ring0_addr: 10.16.35.104
+	}
+
+	node {
+		ring0_addr: 10.16.35.105
+	}
+}
+
+quorum {
+	# Enable and configure quorum subsystem (default: off)
+	# see also corosync.conf.5 and votequorum.5
+	#provider: corosync_votequorum
+}
+"""
+        parsed = """\
+totem {
+    version: 2
+    crypto_cipher: none
+    crypto_hash: none
+    transport: udpu
+
+    interface {
+        ringnumber: 0
+        bindnetaddr: 10.16.35.0
+        mcastport: 5405
+        ttl: 1
+    }
+}
+
+logging {
+    fileline: off
+    to_logfile: yes
+    to_syslog: yes
+    logfile: /var/log/cluster/corosync.log
+    debug: off
+    timestamp: on
+
+    logger_subsys {
+        subsys: QUORUM
+        debug: off
+    }
+}
+
+nodelist {
+    node {
+        ring0_addr: 10.16.35.101
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: 10.16.35.102
+        nodeid: 2
+    }
+
+    node {
+        ring0_addr: 10.16.35.103
+    }
+
+    node {
+        ring0_addr: 10.16.35.104
+    }
+
+    node {
+        ring0_addr: 10.16.35.105
+    }
+}
+
+quorum {
+}
+"""
+        ac(str(corosync_conf.parse_string(string)), parsed)
+
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/pcs/test/test_node.py b/pcs/test/test_node.py
new file mode 100644
index 0000000..7957016
--- /dev/null
+++ b/pcs/test/test_node.py
@@ -0,0 +1,221 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os,sys
+import shutil
+import unittest
+currentdir = os.path.dirname(os.path.abspath(__file__))
+parentdir = os.path.dirname(currentdir)
+sys.path.insert(0, parentdir)
+import utils
+from pcs_test_functions import pcs,ac
+
+empty_cib = os.path.join(currentdir, "empty-withnodes.xml")
+temp_cib = os.path.join(currentdir, "temp.xml")
+
+class ClusterTest(unittest.TestCase):
+    def setUp(self):
+        shutil.copy(empty_cib, temp_cib)
+
+    def test_node_maintenance(self):
+        output, _ = pcs(temp_cib, "property")
+        expected_out = """\
+Cluster Properties:
+"""
+        ac(expected_out, output)
+        output, returnVal = pcs(temp_cib, "node maintenance rh7-1")
+        ac("", output)
+        self.assertEqual(returnVal, 0)
+        output, _ = pcs(temp_cib, "property")
+        expected_out = """\
+Cluster Properties:
+Node Attributes:
+ rh7-1: maintenance=on
+"""
+        ac(expected_out, output)
+
+        output, returnVal = pcs(temp_cib, "node maintenance rh7-1")
+        ac("", output)
+        self.assertEqual(returnVal, 0)
+        output, _ = pcs(temp_cib, "property")
+        expected_out = """\
+Cluster Properties:
+Node Attributes:
+ rh7-1: maintenance=on
+"""
+        ac(expected_out, output)
+
+        output, returnVal = pcs(temp_cib, "node maintenance --all")
+        ac("", output)
+        self.assertEqual(returnVal, 0)
+        output, _ = pcs(temp_cib, "property")
+        expected_out = """\
+Cluster Properties:
+Node Attributes:
+ rh7-1: maintenance=on
+ rh7-2: maintenance=on
+"""
+        ac(expected_out, output)
+
+        output, returnVal = pcs(temp_cib, "node unmaintenance rh7-2 rh7-1")
+        ac("", output)
+        self.assertEqual(returnVal, 0)
+        output, _ = pcs(temp_cib, "property")
+        expected_out = """\
+Cluster Properties:
+"""
+        ac(expected_out, output)
+
+        output, returnVal = pcs(temp_cib, "node maintenance rh7-1 rh7-2")
+        ac("", output)
+        self.assertEqual(returnVal, 0)
+        output, _ = pcs(temp_cib, "property")
+        expected_out = """\
+Cluster Properties:
+Node Attributes:
+ rh7-1: maintenance=on
+ rh7-2: maintenance=on
+"""
+        ac(expected_out, output)
+
+        output, returnVal = pcs(temp_cib, "node maintenance nonexistant-node")
+        self.assertEqual(returnVal, 1)
+        self.assertEqual(
+            output,
+            "Error: Node 'nonexistant-node' does not appear to exist in configuration\n"
+        )
+        output, _ = pcs(temp_cib, "property")
+        expected_out = """\
+Cluster Properties:
+Node Attributes:
+ rh7-1: maintenance=on
+ rh7-2: maintenance=on
+"""
+        ac(expected_out, output)
+
+        output, returnVal = pcs(temp_cib, "node unmaintenance rh7-1")
+        ac("", output)
+        self.assertEqual(returnVal, 0)
+        output, _ = pcs(temp_cib, "property")
+        expected_out = """\
+Cluster Properties:
+Node Attributes:
+ rh7-2: maintenance=on
+"""
+        ac(expected_out, output)
+
+        output, returnVal = pcs(temp_cib, "node unmaintenance rh7-1")
+        ac("", output)
+        self.assertEqual(returnVal, 0)
+        output, _ = pcs(temp_cib, "property")
+        expected_out = """\
+Cluster Properties:
+Node Attributes:
+ rh7-2: maintenance=on
+"""
+        ac(expected_out, output)
+
+        output, returnVal = pcs(temp_cib, "node unmaintenance --all")
+        ac("", output)
+        self.assertEqual(returnVal, 0)
+        output, _ = pcs(temp_cib, "property")
+        expected_out = """\
+Cluster Properties:
+"""
+        ac(expected_out, output)
+
+        output, returnVal = pcs(temp_cib, "node unmaintenance nonexistant-node")
+        self.assertEqual(returnVal, 1)
+        self.assertEqual(
+            output,
+            "Error: Node 'nonexistant-node' does not appear to exist in configuration\n"
+        )
+        output, _ = pcs(temp_cib, "property")
+        expected_out = """\
+Cluster Properties:
+"""
+        ac(expected_out, output)
+
+    def test_node_utilization_set(self):
+        output, returnVal = pcs(temp_cib, "node utilization rh7-1 test1=10")
+        ac("", output)
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "node utilization rh7-2")
+        expected_out = """\
+Node Utilization:
+ rh7-2: \n"""
+        ac(expected_out, output)
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "node utilization rh7-1")
+        expected_out = """\
+Node Utilization:
+ rh7-1: test1=10
+"""
+        ac(expected_out, output)
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib, "node utilization rh7-1 test1=-10 test4=1234"
+        )
+        ac("", output)
+        self.assertEqual(0, returnVal)
+        output, returnVal = pcs(temp_cib, "node utilization rh7-1")
+        expected_out = """\
+Node Utilization:
+ rh7-1: test1=-10 test4=1234
+"""
+        ac(expected_out, output)
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib, "node utilization rh7-2 test2=321 empty="
+        )
+        ac("", output)
+        self.assertEqual(0, returnVal)
+        output, returnVal = pcs(temp_cib, "node utilization rh7-2")
+        expected_out = """\
+Node Utilization:
+ rh7-2: test2=321
+"""
+        ac(expected_out, output)
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "node utilization")
+        expected_out = """\
+Node Utilization:
+ rh7-1: test1=-10 test4=1234
+ rh7-2: test2=321
+"""
+        ac(expected_out, output)
+        self.assertEqual(0, returnVal)
+
+    def test_node_utilization_set_invalid(self):
+        output, returnVal = pcs(temp_cib, "node utilization rh7-0")
+        expected_out = """\
+Error: Unable to find a node: rh7-0
+"""
+        ac(expected_out, output)
+        self.assertEqual(1, returnVal)
+
+        output, returnVal = pcs(temp_cib, "node utilization rh7-0 test=10")
+        expected_out = """\
+Error: Unable to find a node: rh7-0
+"""
+        ac(expected_out, output)
+        self.assertEqual(1, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib, "node utilization rh7-1 test1=10 test=int"
+        )
+        expected_out = """\
+Error: Value of utilization attribute must be integer: 'test=int'
+"""
+        ac(expected_out, output)
+        self.assertEqual(1, returnVal)
+
+if __name__ == "__main__":
+    unittest.main()
diff --git a/pcs/test/test_properties.py b/pcs/test/test_properties.py
index ec0876a..aeab52b 100644
--- a/pcs/test/test_properties.py
+++ b/pcs/test/test_properties.py
@@ -1,3 +1,8 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
 import os,sys
 import shutil
 import unittest
@@ -105,7 +110,7 @@ class PropertyTest(unittest.TestCase):
 
         o,r = pcs("property unset --node=rh7-1 IP")
         ac(o,"Error: attribute: 'IP' doesn't exist for node: 'rh7-1'\n")
-        assert r==1
+        assert r==2
 
         o,r = pcs("property unset --node=rh7-1 IP --force")
         ac(o,"")
@@ -116,8 +121,8 @@ class PropertyTest(unittest.TestCase):
         assert r==1
         ac(o,"Error: unknown cluster property: 'xxxx', (use --force to override)\n")
 
-        output, returnVal = pcs("property set 1234=5678 --force")
-        ac(output, "Error: invalid property name '1234', '1' is not a valid first character for a property name\n")
+        output, returnVal = pcs("property set =5678 --force")
+        ac(output, "Error: property name cannot be empty\n")
         assert returnVal == 1
 
         o,r = pcs("property unset zzzzz")
diff --git a/pcs/test/test_resource.py b/pcs/test/test_resource.py
index 97711dc..730450d 100644
--- a/pcs/test/test_resource.py
+++ b/pcs/test/test_resource.py
@@ -1,12 +1,20 @@
-import os,sys
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os
+import sys
 import shutil
 import re
-import datetime
 import unittest
 parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-sys.path.insert(0,parentdir) 
+sys.path.insert(0, parentdir)
+
 import utils
-from pcs_test_functions import pcs,ac
+from pcs_test_functions import pcs, ac
+import resource
+
 
 empty_cib = "empty.xml"
 temp_cib = "temp.xml"
@@ -189,11 +197,44 @@ the health of a system via IPMI.
 # Verify all resource have been added
         output, returnVal = pcs(temp_cib, "resource show")
         assert returnVal == 0
-        assert output == ' ClusterIP\t(ocf::heartbeat:IPaddr2):\tStopped \n ClusterIP2\t(ocf::heartbeat:IPaddr2):\tStopped \n ClusterIP3\t(ocf::heartbeat:IPaddr2):\tStopped \n ClusterIP4\t(ocf::heartbeat:IPaddr2):\tStopped \n ClusterIP5\t(ocf::heartbeat:IPaddr2):\tStopped \n ClusterIP6\t(ocf::heartbeat:IPaddr2):\tStopped \n ClusterIP7\t(ocf::heartbeat:IPaddr2):\tStopped \n'
+        ac(output, """\
+ ClusterIP\t(ocf::heartbeat:IPaddr2):\tStopped
+ ClusterIP2\t(ocf::heartbeat:IPaddr2):\tStopped
+ ClusterIP3\t(ocf::heartbeat:IPaddr2):\tStopped
+ ClusterIP4\t(ocf::heartbeat:IPaddr2):\tStopped
+ ClusterIP5\t(ocf::heartbeat:IPaddr2):\tStopped
+ ClusterIP6\t(ocf::heartbeat:IPaddr2):\tStopped
+ ClusterIP7\t(ocf::heartbeat:IPaddr2):\t(target-role:Stopped) Stopped
+""")
 
         output, returnVal = pcs(temp_cib, "resource show ClusterIP6 --full")
         assert returnVal == 0
-        assert output == ' Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)\n  Attributes: ip=192.168.0.99 cidr_netmask=32 \n  Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)\n Resource: ClusterIP2 (class=ocf provider=heartbeat type=IPaddr2)\n  Attributes: ip=192.168.0.99 cidr_netmask=32 \n  Operations: monitor interval=30s (ClusterIP2-monitor-interval-30s)\n Resource: ClusterIP3 (class=ocf provider=heartbeat type=IPaddr2)\n  Attributes: ip=192.168.0.99 c [...]
+        ac(output, """\
+ Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+  Attributes: ip=192.168.0.99 cidr_netmask=32
+  Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
+ Resource: ClusterIP2 (class=ocf provider=heartbeat type=IPaddr2)
+  Attributes: ip=192.168.0.99 cidr_netmask=32
+  Operations: monitor interval=30s (ClusterIP2-monitor-interval-30s)
+ Resource: ClusterIP3 (class=ocf provider=heartbeat type=IPaddr2)
+  Attributes: ip=192.168.0.99 cidr_netmask=32
+  Operations: monitor interval=30s (ClusterIP3-monitor-interval-30s)
+ Resource: ClusterIP4 (class=ocf provider=heartbeat type=IPaddr2)
+  Attributes: ip=192.168.0.99 cidr_netmask=32
+  Operations: monitor interval=30s (ClusterIP4-monitor-interval-30s)
+ Resource: ClusterIP5 (class=ocf provider=heartbeat type=IPaddr2)
+  Attributes: ip=192.168.0.99 cidr_netmask=32
+  Operations: monitor interval=30s (ClusterIP5-monitor-interval-30s)
+ Resource: ClusterIP6 (class=ocf provider=heartbeat type=IPaddr2)
+  Attributes: ip=192.168.0.99 cidr_netmask=32
+  Operations: monitor interval=31s (ClusterIP6-monitor-interval-31s)
+              start interval=32s (ClusterIP6-start-interval-32s)
+              stop interval=33s (ClusterIP6-stop-interval-33s)
+ Resource: ClusterIP7 (class=ocf provider=heartbeat type=IPaddr2)
+  Attributes: ip=192.168.0.99 cidr_netmask=32
+  Meta Attrs: target-role=Stopped 
+  Operations: monitor interval=30s (ClusterIP7-monitor-interval-30s)
+""")
 
         output, returnVal = pcs(
             temp_cib,
@@ -202,7 +243,7 @@ the health of a system via IPMI.
         ac(output, """\
 Error: When using 'op' you must specify an operation name and at least one option
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -211,7 +252,7 @@ Error: When using 'op' you must specify an operation name and at least one optio
         ac(output, """\
 Error: When using 'op' you must specify an operation name after 'op'
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -220,7 +261,7 @@ Error: When using 'op' you must specify an operation name after 'op'
         ac(output, """\
 Error: When using 'op' you must specify an operation name and at least one option
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -229,7 +270,7 @@ Error: When using 'op' you must specify an operation name and at least one optio
         ac(output, """\
 Error: When using 'op' you must specify an operation name and at least one option
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -238,7 +279,7 @@ Error: When using 'op' you must specify an operation name and at least one optio
         ac(output, """\
 Error: When using 'op' you must specify an operation name and at least one option
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -248,14 +289,14 @@ Error: When using 'op' you must specify an operation name and at least one optio
 Error: operation monitor with interval 10s already specified for A:
 monitor interval=10 timeout=10 (A-monitor-interval-10)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "resource create A dummy op monitor interval=10 timeout=10 op stop interval=10 timeout=20"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource show A")
         ac(output, """\
@@ -264,7 +305,7 @@ monitor interval=10 timeout=10 (A-monitor-interval-10)
               monitor interval=10 timeout=10 (A-monitor-interval-10)
               stop interval=10 timeout=20 (A-stop-interval-10)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
     def testAddBadResources(self):
         line = "resource create --no-default-ops bad_resource idontexist test=bad"
@@ -280,7 +321,11 @@ monitor interval=10 timeout=10 (A-monitor-interval-10)
         line = "resource show --full"
         output, returnVal = pcs(temp_cib, line) 
         assert returnVal == 0
-        assert output == " Resource: bad_resource2 (class=ocf provider=heartbeat type=idontexist2)\n  Attributes: test4=bad3 \n  Operations: monitor interval=60s (bad_resource2-monitor-interval-60s)\n",[output]
+        ac(output, """\
+ Resource: bad_resource2 (class=ocf provider=heartbeat type=idontexist2)
+  Attributes: test4=bad3
+  Operations: monitor interval=60s (bad_resource2-monitor-interval-60s)
+""")
 
         output, returnVal = pcs(temp_cib, "resource create dum:my Dummy")
         assert returnVal == 1
@@ -311,6 +356,10 @@ monitor interval=10 timeout=10 (A-monitor-interval-10)
         assert returnVal == 0
         assert output == 'NO resources configured\n'
 
+        output, returnVal = pcs(temp_cib, "resource delete ClusterIP")
+        assert returnVal == 1
+        ac(output, "Error: Resource 'ClusterIP' does not exist.\n")
+
     def testResourceShow(self):
         line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
         output, returnVal = pcs(temp_cib, line) 
@@ -319,7 +368,11 @@ monitor interval=10 timeout=10 (A-monitor-interval-10)
 
         output, returnVal = pcs(temp_cib, "resource show ClusterIP")
         assert returnVal == 0
-        assert output == ' Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)\n  Attributes: ip=192.168.0.99 cidr_netmask=32 \n  Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)\n', [output]
+        ac(output, """\
+ Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+  Attributes: ip=192.168.0.99 cidr_netmask=32
+  Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
+""")
 
     def testResourceUpdate(self):
         line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
@@ -362,6 +415,15 @@ monitor interval=10 timeout=10 (A-monitor-interval-10)
 
         line = 'resource op add ClusterIP monitor interval=31s'
         output, returnVal = pcs(temp_cib, line) 
+        ac(output, """\
+Error: operation monitor already specified for ClusterIP, use --force to override:
+monitor interval=30s (ClusterIP-monitor-interval-30s)
+""")
+        self.assertEqual(1, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib, "resource op add ClusterIP monitor interval=31s --force"
+        )
         assert returnVal == 0
         assert output == ""
 
@@ -392,33 +454,47 @@ Error: moni=tor does not appear to be a valid operation action
 
         output, returnVal = pcs(temp_cib, "resource show ClusterIP")
         assert returnVal == 0
-        ac (output,' Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)\n  Attributes: ip=192.168.0.99 cidr_netmask=32 \n  Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)\n              monitor interval=31s (ClusterIP-monitor-interval-31s)\n')
+        ac(output, """\
+ Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+  Attributes: ip=192.168.0.99 cidr_netmask=32
+  Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
+              monitor interval=31s (ClusterIP-monitor-interval-31s)
+""")
 
         o, r = pcs(temp_cib, "resource create --no-default-ops OPTest Dummy op monitor interval=30s OCF_CHECK_LEVEL=1 op monitor interval=25s OCF_CHECK_LEVEL=1")
         ac(o,"")
         assert r == 0
-        
+
         o, r = pcs(temp_cib, "resource show OPTest")
         ac(o," Resource: OPTest (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=30s OCF_CHECK_LEVEL=1 (OPTest-monitor-interval-30s)\n              monitor interval=25s OCF_CHECK_LEVEL=1 (OPTest-monitor-interval-25s)\n")
         assert r == 0
 
-        o, r = pcs(temp_cib, "resource create --no-default-ops OPTest2 Dummy op monitor interval=30s OCF_CHECK_LEVEL=1 op monitor interval=25s OCF_CHECK_LEVEL=1 op start timeout=30s")
+        o, r = pcs(temp_cib, "resource create --no-default-ops OPTest2 Dummy op monitor interval=30s OCF_CHECK_LEVEL=1 op monitor interval=25s OCF_CHECK_LEVEL=2 op start timeout=30s")
         ac(o,"")
         assert r == 0
-        
+
         o, r = pcs(temp_cib, "resource op add OPTest2 start timeout=1800s")
         ac(o, """\
 Error: operation start with interval 0s already specified for OPTest2:
 start interval=0s timeout=30s (OPTest2-start-interval-0s)
 """)
         assert r == 1
-        
+
+        output, retVal = pcs(
+            temp_cib, "resource op add OPTest2 start interval=100"
+        )
+        ac(output, """\
+Error: operation start already specified for OPTest2, use --force to override:
+start interval=0s timeout=30s (OPTest2-start-interval-0s)
+""")
+        self.assertEqual(1, retVal)
+
         o, r = pcs(temp_cib, "resource op add OPTest2 monitor timeout=1800s")
         ac(o,"")
         assert r == 0
-        
+
         o, r = pcs(temp_cib, "resource show OPTest2")
-        ac(o," Resource: OPTest2 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=30s OCF_CHECK_LEVEL=1 (OPTest2-monitor-interval-30s)\n              monitor interval=25s OCF_CHECK_LEVEL=1 (OPTest2-monitor-interval-25s)\n              start interval=0s timeout=30s (OPTest2-start-interval-0s)\n              monitor interval=60s timeout=1800s (OPTest2-monitor-interval-60s)\n")
+        ac(o," Resource: OPTest2 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=30s OCF_CHECK_LEVEL=1 (OPTest2-monitor-interval-30s)\n              monitor interval=25s OCF_CHECK_LEVEL=2 (OPTest2-monitor-interval-25s)\n              start interval=0s timeout=30s (OPTest2-start-interval-0s)\n              monitor interval=60s timeout=1800s (OPTest2-monitor-interval-60s)\n")
         assert r == 0
 
         o,r = pcs(temp_cib, "resource create --no-default-ops OPTest3 Dummy op monitor OCF_CHECK_LEVEL=1")
@@ -474,6 +550,13 @@ start interval=0s timeout=30s (OPTest2-start-interval-0s)
         assert r == 0
 
         o,r = pcs(temp_cib, "resource op add OPTest7 monitor interval=61s OCF_CHECK_LEVEL=1")
+        ac(o, """\
+Error: operation monitor already specified for OPTest7, use --force to override:
+monitor interval=60s OCF_CHECK_LEVEL=1 (OPTest7-monitor-interval-60s)
+""")
+        self.assertEqual(1, r)
+
+        o,r = pcs(temp_cib, "resource op add OPTest7 monitor interval=61s OCF_CHECK_LEVEL=1 --force")
         ac(o,"")
         assert r == 0
 
@@ -488,58 +571,104 @@ monitor interval=60s OCF_CHECK_LEVEL=1 (OPTest7-monitor-interval-60s)
 """)
         assert r == 1
 
-        o,r = pcs("resource create --no-default-ops OCFTest1 Dummy")
+        o,r = pcs(temp_cib, "resource create --no-default-ops OCFTest1 Dummy")
         ac(o,"")
         assert r == 0
 
-        o,r = pcs("resource op add OCFTest1 monitor interval=31s")
+        o,r = pcs(temp_cib, "resource op add OCFTest1 monitor interval=31s")
+        ac(o, """\
+Error: operation monitor already specified for OCFTest1, use --force to override:
+monitor interval=60s (OCFTest1-monitor-interval-60s)
+""")
+        self.assertEqual(1, r)
+
+        o,r = pcs(temp_cib, "resource op add OCFTest1 monitor interval=31s --force")
         ac(o,"")
         assert r == 0
 
-        o,r = pcs("resource op add OCFTest1 monitor interval=30s OCF_CHECK_LEVEL=15")
+        o,r = pcs(temp_cib, "resource op add OCFTest1 monitor interval=30s OCF_CHECK_LEVEL=15")
         ac(o,"")
         assert r == 0
 
-        o,r = pcs("resource show OCFTest1")
+        o,r = pcs(temp_cib, "resource show OCFTest1")
         ac(o," Resource: OCFTest1 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s (OCFTest1-monitor-interval-60s)\n              monitor interval=31s (OCFTest1-monitor-interval-31s)\n              monitor interval=30s OCF_CHECK_LEVEL=15 (OCFTest1-monitor-interval-30s)\n")
         assert r == 0
 
-        o,r = pcs("resource update OCFTest1 op monitor interval=61s OCF_CHECK_LEVEL=5")
+        o,r = pcs(temp_cib, "resource update OCFTest1 op monitor interval=61s OCF_CHECK_LEVEL=5")
         ac(o,"")
         assert r == 0
 
-        o,r = pcs("resource show OCFTest1")
+        o,r = pcs(temp_cib, "resource show OCFTest1")
         ac(o," Resource: OCFTest1 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=61s OCF_CHECK_LEVEL=5 (OCFTest1-monitor-interval-61s)\n              monitor interval=31s (OCFTest1-monitor-interval-31s)\n              monitor interval=30s OCF_CHECK_LEVEL=15 (OCFTest1-monitor-interval-30s)\n")
         assert r == 0
 
-        o,r = pcs("resource update OCFTest1 op monitor OCF_CHECK_LEVEL=4")
+        o,r = pcs(temp_cib, "resource update OCFTest1 op monitor OCF_CHECK_LEVEL=4")
         ac(o,"")
         assert r == 0
 
-        o,r = pcs("resource show OCFTest1")
+        o,r = pcs(temp_cib, "resource show OCFTest1")
         ac(o," Resource: OCFTest1 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s OCF_CHECK_LEVEL=4 (OCFTest1-monitor-interval-60s)\n              monitor interval=31s (OCFTest1-monitor-interval-31s)\n              monitor interval=30s OCF_CHECK_LEVEL=15 (OCFTest1-monitor-interval-30s)\n")
         assert r == 0
 
-        o,r = pcs("resource update OCFTest1 op monitor OCF_CHECK_LEVEL=4 interval=35s")
+        o,r = pcs(temp_cib, "resource update OCFTest1 op monitor OCF_CHECK_LEVEL=4 interval=35s")
         ac(o,"")
         assert r == 0
 
-        o,r = pcs("resource show OCFTest1")
+        o,r = pcs(temp_cib, "resource show OCFTest1")
         ac(o," Resource: OCFTest1 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=35s OCF_CHECK_LEVEL=4 (OCFTest1-monitor-interval-35s)\n              monitor interval=31s (OCFTest1-monitor-interval-31s)\n              monitor interval=30s OCF_CHECK_LEVEL=15 (OCFTest1-monitor-interval-30s)\n")
         assert r == 0
 
+        output, retVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops state ocf:pacemaker:Stateful"
+        )
+        ac(output, "")
+        self.assertEqual(0, retVal)
+
+        output, retVal = pcs(
+            temp_cib, "resource op add state monitor interval=10"
+        )
+        ac(output, """\
+Error: operation monitor already specified for state, use --force to override:
+monitor interval=60s (state-monitor-interval-60s)
+""")
+        self.assertEqual(1, retVal)
+
+        output, retVal = pcs(
+            temp_cib, "resource op add state monitor interval=10 role=Started"
+        )
+        ac(output, """\
+Error: operation monitor already specified for state, use --force to override:
+monitor interval=60s (state-monitor-interval-60s)
+""")
+        self.assertEqual(1, retVal)
+
+        output, retVal = pcs(
+            temp_cib, "resource op add state monitor interval=10 role=Master"
+        )
+        ac(output, "")
+        self.assertEqual(0, retVal)
+
+        output, retVal = pcs(temp_cib, "resource show state")
+        ac(output, """\
+ Resource: state (class=ocf provider=pacemaker type=Stateful)
+  Operations: monitor interval=60s (state-monitor-interval-60s)
+              monitor interval=10 role=Master (state-monitor-interval-10)
+""")
+        self.assertEqual(0, retVal)
+
     def testRemoveOperation(self):
         line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
         output, returnVal = pcs(temp_cib, line) 
         assert returnVal == 0
         assert output == ""
 
-        line = 'resource op add ClusterIP monitor interval=31s'
+        line = 'resource op add ClusterIP monitor interval=31s --force'
         output, returnVal = pcs(temp_cib, line) 
         assert returnVal == 0
         assert output == ""
 
-        line = 'resource op add ClusterIP monitor interval=32s'
+        line = 'resource op add ClusterIP monitor interval=32s --force'
         output, returnVal = pcs(temp_cib, line) 
         assert returnVal == 0
         assert output == ""
@@ -565,8 +694,12 @@ monitor interval=60s OCF_CHECK_LEVEL=1 (OPTest7-monitor-interval-60s)
         assert output == 'Error: Unable to find operation matching: monitor interval=30s\n'
 
         output, returnVal = pcs(temp_cib, "resource show ClusterIP")
+        ac(output, """\
+ Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+  Attributes: ip=192.168.0.99 cidr_netmask=32
+  Operations: monitor interval=31s (ClusterIP-monitor-interval-31s)
+""")
         assert returnVal == 0
-        ac(output,' Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)\n  Attributes: ip=192.168.0.99 cidr_netmask=32 \n  Operations: monitor interval=31s (ClusterIP-monitor-interval-31s)\n')
 
         line = 'resource op remove ClusterIP monitor interval=31s'
         output, returnVal = pcs(temp_cib, line) 
@@ -574,15 +707,18 @@ monitor interval=60s OCF_CHECK_LEVEL=1 (OPTest7-monitor-interval-60s)
         assert output == ""
 
         output, returnVal = pcs(temp_cib, "resource show ClusterIP")
+        ac(output, """\
+ Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+  Attributes: ip=192.168.0.99 cidr_netmask=32
+""")
         assert returnVal == 0
-        assert output == ' Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)\n  Attributes: ip=192.168.0.99 cidr_netmask=32 \n'
 
         line = 'resource op add ClusterIP monitor interval=31s'
         output, returnVal = pcs(temp_cib, line) 
         assert returnVal == 0
         assert output == ""
 
-        line = 'resource op add ClusterIP monitor interval=32s'
+        line = 'resource op add ClusterIP monitor interval=32s --force'
         output, returnVal = pcs(temp_cib, line) 
         assert returnVal == 0
         assert output == ""
@@ -604,10 +740,15 @@ monitor interval=60s OCF_CHECK_LEVEL=1 (OPTest7-monitor-interval-60s)
         assert output == ""
 
         output, returnVal = pcs(temp_cib, "resource show ClusterIP")
+        ac(output, """\
+ Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+  Attributes: ip=192.168.0.99 cidr_netmask=32
+  Operations: stop interval=0s timeout=34s (ClusterIP-stop-interval-0s)
+              start interval=0s timeout=33s (ClusterIP-start-interval-0s)
+""")
         assert returnVal == 0
-        ac (output,' Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)\n  Attributes: ip=192.168.0.99 cidr_netmask=32 \n  Operations: stop interval=0s timeout=34s (ClusterIP-stop-interval-0s)\n              start interval=0s timeout=33s (ClusterIP-start-interval-0s)\n')
 
-    def testUpdateOpration(self):
+    def testUpdateOperation(self):
         line = "resource create --no-default-ops ClusterIP ocf:heartbeat:IPaddr2 ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s"
         output, returnVal = pcs(temp_cib, line) 
         assert output == ""
@@ -640,15 +781,20 @@ monitor interval=60s OCF_CHECK_LEVEL=1 (OPTest7-monitor-interval-60s)
 
         line = 'resource show ClusterIP --full'
         output, returnVal = pcs(temp_cib, line) 
+        ac(output, """\
+ Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+  Attributes: ip=192.168.0.99 cidr_netmask=32
+  Operations: monitor interval=33s (ClusterIP-monitor-interval-33s)
+              start interval=30s timeout=180s (ClusterIP-start-interval-30s)
+""")
         assert returnVal == 0
-        ac(output,' Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)\n  Attributes: ip=192.168.0.99 cidr_netmask=32 \n  Operations: monitor interval=33s (ClusterIP-monitor-interval-33s)\n              start interval=30s timeout=180s (ClusterIP-start-interval-30s)\n')
 
         output, returnVal = pcs(
             temp_cib,
             "resource create A dummy op monitor interval=10 op monitor interval=20"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource show A")
         ac(output, """\
@@ -658,7 +804,7 @@ monitor interval=60s OCF_CHECK_LEVEL=1 (OPTest7-monitor-interval-60s)
               monitor interval=10 (A-monitor-interval-10)
               monitor interval=20 (A-monitor-interval-20)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -668,14 +814,14 @@ monitor interval=60s OCF_CHECK_LEVEL=1 (OPTest7-monitor-interval-60s)
 Error: operation monitor with interval 20s already specified for A:
 monitor interval=20 (A-monitor-interval-20)
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "resource update A op monitor interval=11"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource show A")
         ac(output, """\
@@ -685,62 +831,62 @@ monitor interval=20 (A-monitor-interval-20)
               monitor interval=11 (A-monitor-interval-11)
               monitor interval=20 (A-monitor-interval-20)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "resource create B dummy --no-default-ops"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "resource op remove B-monitor-interval-60s"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource show B")
         ac(output, """\
  Resource: B (class=ocf provider=heartbeat type=Dummy)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "resource update B op monitor interval=60s"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource show B")
         ac(output, """\
  Resource: B (class=ocf provider=heartbeat type=Dummy)
   Operations: monitor interval=60s (B-monitor-interval-60s)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "resource update B op monitor interval=30"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource show B")
         ac(output, """\
  Resource: B (class=ocf provider=heartbeat type=Dummy)
   Operations: monitor interval=30 (B-monitor-interval-30)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "resource update B op start interval=0 timeout=10"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource show B")
         ac(output, """\
@@ -748,14 +894,14 @@ monitor interval=20 (A-monitor-interval-20)
   Operations: monitor interval=30 (B-monitor-interval-30)
               start interval=0 timeout=10 (B-start-interval-0)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "resource update B op start interval=0 timeout=20"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource show B")
         ac(output, """\
@@ -763,14 +909,14 @@ monitor interval=20 (A-monitor-interval-20)
   Operations: monitor interval=30 (B-monitor-interval-30)
               start interval=0 timeout=20 (B-start-interval-0)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "resource update B op monitor interval=33"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource show B")
         ac(output, """\
@@ -778,14 +924,14 @@ monitor interval=20 (A-monitor-interval-20)
   Operations: monitor interval=33 (B-monitor-interval-33)
               start interval=0 timeout=20 (B-start-interval-0)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "resource update B op monitor interval=100 role=Master"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource show B")
         ac(output, """\
@@ -794,14 +940,14 @@ monitor interval=20 (A-monitor-interval-20)
               start interval=0 timeout=20 (B-start-interval-0)
               monitor interval=100 role=Master (B-monitor-interval-100)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "resource update B op start interval=0 timeout=22"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource show B")
         ac(output, """\
@@ -810,7 +956,7 @@ monitor interval=20 (A-monitor-interval-20)
               start interval=0 timeout=22 (B-start-interval-0)
               monitor interval=100 role=Master (B-monitor-interval-100)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
     def testGroupDeleteTest(self):
         o,r = pcs(temp_cib, "resource create --no-default-ops A1 Dummy --group AGroup")
@@ -822,7 +968,12 @@ monitor interval=20 (A-monitor-interval-20)
 
         o,r = pcs(temp_cib, "resource show")
         assert r == 0
-        ac(o," Resource Group: AGroup\n     A1\t(ocf::heartbeat:Dummy):\tStopped \n     A2\t(ocf::heartbeat:Dummy):\tStopped \n     A3\t(ocf::heartbeat:Dummy):\tStopped \n")
+        ac(o,"""\
+ Resource Group: AGroup
+     A1\t(ocf::heartbeat:Dummy):\tStopped
+     A2\t(ocf::heartbeat:Dummy):\tStopped
+     A3\t(ocf::heartbeat:Dummy):\tStopped
+""")
 
         o,r = pcs(temp_cib, "resource delete AGroup")
         ac(o,"Removing group: AGroup (and all resources within group)\nStopping all resources in group: AGroup...\nDeleting Resource - A1\nDeleting Resource - A2\nDeleting Resource (and group) - A3\n")
@@ -875,12 +1026,42 @@ monitor interval=20 (A-monitor-interval-20)
 
         o,r = pcs(temp_cib, "resource show")
         assert r == 0
-        ac(o,' ClusterIP6\t(ocf::heartbeat:IPaddr2):\tStopped \n Resource Group: TestGroup1\n     ClusterIP\t(ocf::heartbeat:IPaddr2):\tStopped \n Clone Set: ClusterIP4-clone [ClusterIP4]\n Master/Slave Set: Master [ClusterIP5]\n Resource Group: AGroup\n     A2\t(ocf::heartbeat:Dummy):\tStopped \n     A4\t(ocf::heartbeat:Dummy):\tStopped \n     A5\t(ocf::heartbeat:Dummy):\tStopped \n A1\t(ocf::heartbeat:Dummy):\tStopped \n A3\t(ocf::heartbeat:Dummy):\tStopped \n')
+        ac(o, """\
+ ClusterIP6\t(ocf::heartbeat:IPaddr2):\tStopped
+ Resource Group: TestGroup1
+     ClusterIP\t(ocf::heartbeat:IPaddr2):\tStopped
+ Clone Set: ClusterIP4-clone [ClusterIP4]
+ Master/Slave Set: Master [ClusterIP5]
+ Resource Group: AGroup
+     A2\t(ocf::heartbeat:Dummy):\tStopped
+     A4\t(ocf::heartbeat:Dummy):\tStopped
+     A5\t(ocf::heartbeat:Dummy):\tStopped
+ A1\t(ocf::heartbeat:Dummy):\tStopped
+ A3\t(ocf::heartbeat:Dummy):\tStopped
+""")
 
-        o,r = pcs(temp_cib, "resource ungroup AGroup")
+        o,r = pcs(temp_cib, "constraint location AGroup prefers rh7-1")
         assert r == 0
         ac(o,'')
 
+        o,r = pcs(temp_cib, "resource ungroup AGroup A2")
+        assert r == 0
+        ac(o,'')
+
+        o,r = pcs(temp_cib, "constraint")
+        assert r == 0
+        ac(o, """\
+Location Constraints:
+  Resource: AGroup
+    Enabled on: rh7-1 (score:INFINITY)
+Ordering Constraints:
+Colocation Constraints:
+""")
+
+        o,r = pcs(temp_cib, "resource ungroup AGroup")
+        assert r == 0
+        ac(o, 'Removing Constraint - location-AGroup-rh7-1-INFINITY\n')
+
         o,r = pcs(temp_cib, "resource show AGroup")
         assert r == 1
         ac(o,"Error: unable to find resource 'AGroup'\n")
@@ -913,7 +1094,16 @@ monitor interval=20 (A-monitor-interval-20)
 
         o,r = pcs(temp_cib, "resource show")
         assert r == 0
-        ac(o,' A1\t(ocf::heartbeat:Dummy):\tStopped \n A2\t(ocf::heartbeat:Dummy):\tStopped \n A3\t(ocf::heartbeat:Dummy):\tStopped \n A4\t(ocf::heartbeat:Dummy):\tStopped \n A5\t(ocf::heartbeat:Dummy):\tStopped \n Resource Group: Dgroup\n     A6\t(ocf::heartbeat:Dummy):\tStopped \n     A7\t(ocf::heartbeat:Dummy):\tStopped \n')
+        ac(o,"""\
+ A1\t(ocf::heartbeat:Dummy):\tStopped
+ A2\t(ocf::heartbeat:Dummy):\tStopped
+ A3\t(ocf::heartbeat:Dummy):\tStopped
+ A4\t(ocf::heartbeat:Dummy):\tStopped
+ A5\t(ocf::heartbeat:Dummy):\tStopped
+ Resource Group: Dgroup
+     A6\t(ocf::heartbeat:Dummy):\tStopped
+     A7\t(ocf::heartbeat:Dummy):\tStopped
+""")
 
         o,r = pcs(temp_cib, "resource delete A6")
         assert r == 0
@@ -935,194 +1125,202 @@ monitor interval=20 (A-monitor-interval-20)
 
         o,r = pcs(temp_cib, "resource show")
         assert r == 0
-        ac(o,' Resource Group: MyGroup\n     A1\t(ocf::heartbeat:Dummy):\tStopped \n     A2\t(ocf::heartbeat:Dummy):\tStopped \n Resource Group: MyGroup2\n     A3\t(ocf::heartbeat:Dummy):\tStopped \n     A4\t(ocf::heartbeat:Dummy):\tStopped \n     A5\t(ocf::heartbeat:Dummy):\tStopped \n')
+        ac(o,"""\
+ Resource Group: MyGroup
+     A1\t(ocf::heartbeat:Dummy):\tStopped
+     A2\t(ocf::heartbeat:Dummy):\tStopped
+ Resource Group: MyGroup2
+     A3\t(ocf::heartbeat:Dummy):\tStopped
+     A4\t(ocf::heartbeat:Dummy):\tStopped
+     A5\t(ocf::heartbeat:Dummy):\tStopped
+""")
 
         o, r = pcs(temp_cib, "resource create --no-default-ops A6 Dummy")
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
 
         o, r = pcs(temp_cib, "resource create --no-default-ops A7 Dummy")
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
 
         o, r = pcs(temp_cib, "resource group add MyGroup A6 --after A1")
         ac(o, "")
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
 
         o, r = pcs(temp_cib, "resource group add MyGroup A7 --before A1")
         ac(o, "")
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
 
         o, r = pcs(temp_cib, "resource show")
         ac(o, """\
  Resource Group: MyGroup
-     A7\t(ocf::heartbeat:Dummy):\tStopped 
-     A1\t(ocf::heartbeat:Dummy):\tStopped 
-     A6\t(ocf::heartbeat:Dummy):\tStopped 
-     A2\t(ocf::heartbeat:Dummy):\tStopped 
+     A7\t(ocf::heartbeat:Dummy):\tStopped
+     A1\t(ocf::heartbeat:Dummy):\tStopped
+     A6\t(ocf::heartbeat:Dummy):\tStopped
+     A2\t(ocf::heartbeat:Dummy):\tStopped
  Resource Group: MyGroup2
-     A3\t(ocf::heartbeat:Dummy):\tStopped 
-     A4\t(ocf::heartbeat:Dummy):\tStopped 
-     A5\t(ocf::heartbeat:Dummy):\tStopped 
+     A3\t(ocf::heartbeat:Dummy):\tStopped
+     A4\t(ocf::heartbeat:Dummy):\tStopped
+     A5\t(ocf::heartbeat:Dummy):\tStopped
 """)
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
 
         o, r = pcs(temp_cib, "resource group add MyGroup2 A6 --before A5")
         ac(o, "")
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
 
         o, r = pcs(temp_cib, "resource group add MyGroup2 A7 --after A5")
         ac(o, "")
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
 
         o, r = pcs(temp_cib, "resource show")
         ac(o, """\
  Resource Group: MyGroup
-     A1\t(ocf::heartbeat:Dummy):\tStopped 
-     A2\t(ocf::heartbeat:Dummy):\tStopped 
+     A1\t(ocf::heartbeat:Dummy):\tStopped
+     A2\t(ocf::heartbeat:Dummy):\tStopped
  Resource Group: MyGroup2
-     A3\t(ocf::heartbeat:Dummy):\tStopped 
-     A4\t(ocf::heartbeat:Dummy):\tStopped 
-     A6\t(ocf::heartbeat:Dummy):\tStopped 
-     A5\t(ocf::heartbeat:Dummy):\tStopped 
-     A7\t(ocf::heartbeat:Dummy):\tStopped 
+     A3\t(ocf::heartbeat:Dummy):\tStopped
+     A4\t(ocf::heartbeat:Dummy):\tStopped
+     A6\t(ocf::heartbeat:Dummy):\tStopped
+     A5\t(ocf::heartbeat:Dummy):\tStopped
+     A7\t(ocf::heartbeat:Dummy):\tStopped
 """)
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
 
         o, r = pcs(temp_cib, "resource group add MyGroup A6 A7 --before A2")
         ac(o, "")
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
 
         o, r = pcs(temp_cib, "resource show")
         ac(o, """\
  Resource Group: MyGroup
-     A1\t(ocf::heartbeat:Dummy):\tStopped 
-     A6\t(ocf::heartbeat:Dummy):\tStopped 
-     A7\t(ocf::heartbeat:Dummy):\tStopped 
-     A2\t(ocf::heartbeat:Dummy):\tStopped 
+     A1\t(ocf::heartbeat:Dummy):\tStopped
+     A6\t(ocf::heartbeat:Dummy):\tStopped
+     A7\t(ocf::heartbeat:Dummy):\tStopped
+     A2\t(ocf::heartbeat:Dummy):\tStopped
  Resource Group: MyGroup2
-     A3\t(ocf::heartbeat:Dummy):\tStopped 
-     A4\t(ocf::heartbeat:Dummy):\tStopped 
-     A5\t(ocf::heartbeat:Dummy):\tStopped 
+     A3\t(ocf::heartbeat:Dummy):\tStopped
+     A4\t(ocf::heartbeat:Dummy):\tStopped
+     A5\t(ocf::heartbeat:Dummy):\tStopped
 """)
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
 
         o, r = pcs(temp_cib, "resource group add MyGroup2 A6 A7 --after A4")
         ac(o, "")
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
 
         o, r = pcs(temp_cib, "resource show")
         ac(o, """\
  Resource Group: MyGroup
-     A1\t(ocf::heartbeat:Dummy):\tStopped 
-     A2\t(ocf::heartbeat:Dummy):\tStopped 
+     A1\t(ocf::heartbeat:Dummy):\tStopped
+     A2\t(ocf::heartbeat:Dummy):\tStopped
  Resource Group: MyGroup2
-     A3\t(ocf::heartbeat:Dummy):\tStopped 
-     A4\t(ocf::heartbeat:Dummy):\tStopped 
-     A6\t(ocf::heartbeat:Dummy):\tStopped 
-     A7\t(ocf::heartbeat:Dummy):\tStopped 
-     A5\t(ocf::heartbeat:Dummy):\tStopped 
+     A3\t(ocf::heartbeat:Dummy):\tStopped
+     A4\t(ocf::heartbeat:Dummy):\tStopped
+     A6\t(ocf::heartbeat:Dummy):\tStopped
+     A7\t(ocf::heartbeat:Dummy):\tStopped
+     A5\t(ocf::heartbeat:Dummy):\tStopped
 """)
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
 
         o, r = pcs(temp_cib, "resource group add MyGroup A6 --before A0")
         ac(o, "Error: there is no resource 'A0' in the group 'MyGroup'\n")
-        self.assertEquals(1, r)
+        self.assertEqual(1, r)
 
         o, r = pcs(temp_cib, "resource group add MyGroup A6 --after A0")
         ac(o, "Error: there is no resource 'A0' in the group 'MyGroup'\n")
-        self.assertEquals(1, r)
+        self.assertEqual(1, r)
 
         o, r = pcs(
             temp_cib,
             "resource group add MyGroup A6 --after A1 --before A2"
         )
         ac(o, "Error: you cannot specify both --before and --after\n")
-        self.assertEquals(1, r)
+        self.assertEqual(1, r)
 
         o,r = pcs(
             temp_cib,
             "resource create --no-default-ops A8 Dummy --group MyGroup --before A1"
         )
         ac(o, "")
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
 
         o,r = pcs(
             temp_cib,
             "resource create --no-default-ops A9 Dummy --group MyGroup --after A1"
         )
         ac(o, "")
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
 
         o, r = pcs(temp_cib, "resource show")
         ac(o, """\
  Resource Group: MyGroup
-     A8\t(ocf::heartbeat:Dummy):\tStopped 
-     A1\t(ocf::heartbeat:Dummy):\tStopped 
-     A9\t(ocf::heartbeat:Dummy):\tStopped 
-     A2\t(ocf::heartbeat:Dummy):\tStopped 
+     A8\t(ocf::heartbeat:Dummy):\tStopped
+     A1\t(ocf::heartbeat:Dummy):\tStopped
+     A9\t(ocf::heartbeat:Dummy):\tStopped
+     A2\t(ocf::heartbeat:Dummy):\tStopped
  Resource Group: MyGroup2
-     A3\t(ocf::heartbeat:Dummy):\tStopped 
-     A4\t(ocf::heartbeat:Dummy):\tStopped 
-     A6\t(ocf::heartbeat:Dummy):\tStopped 
-     A7\t(ocf::heartbeat:Dummy):\tStopped 
-     A5\t(ocf::heartbeat:Dummy):\tStopped 
+     A3\t(ocf::heartbeat:Dummy):\tStopped
+     A4\t(ocf::heartbeat:Dummy):\tStopped
+     A6\t(ocf::heartbeat:Dummy):\tStopped
+     A7\t(ocf::heartbeat:Dummy):\tStopped
+     A5\t(ocf::heartbeat:Dummy):\tStopped
 """)
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
 
         o, r = pcs(temp_cib, "resource group add MyGroup A1 --before A8")
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
         ac(o, "")
 
         o, r = pcs(temp_cib, "resource group add MyGroup2 A3 --after A6")
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
         ac(o, "")
 
         o, r = pcs(temp_cib, "resource show")
         ac(o, """\
  Resource Group: MyGroup
-     A1\t(ocf::heartbeat:Dummy):\tStopped 
-     A8\t(ocf::heartbeat:Dummy):\tStopped 
-     A9\t(ocf::heartbeat:Dummy):\tStopped 
-     A2\t(ocf::heartbeat:Dummy):\tStopped 
+     A1\t(ocf::heartbeat:Dummy):\tStopped
+     A8\t(ocf::heartbeat:Dummy):\tStopped
+     A9\t(ocf::heartbeat:Dummy):\tStopped
+     A2\t(ocf::heartbeat:Dummy):\tStopped
  Resource Group: MyGroup2
-     A4\t(ocf::heartbeat:Dummy):\tStopped 
-     A6\t(ocf::heartbeat:Dummy):\tStopped 
-     A3\t(ocf::heartbeat:Dummy):\tStopped 
-     A7\t(ocf::heartbeat:Dummy):\tStopped 
-     A5\t(ocf::heartbeat:Dummy):\tStopped 
+     A4\t(ocf::heartbeat:Dummy):\tStopped
+     A6\t(ocf::heartbeat:Dummy):\tStopped
+     A3\t(ocf::heartbeat:Dummy):\tStopped
+     A7\t(ocf::heartbeat:Dummy):\tStopped
+     A5\t(ocf::heartbeat:Dummy):\tStopped
 """)
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
 
         o, r = pcs(temp_cib, "resource group add MyGroup2 A3 --after A3")
-        self.assertEquals(1, r)
+        self.assertEqual(1, r)
         ac(o, "Error: cannot put resource after itself\n")
 
         o, r = pcs(temp_cib, "resource group add MyGroup2 A3 --before A3")
-        self.assertEquals(1, r)
+        self.assertEqual(1, r)
         ac(o, "Error: cannot put resource before itself\n")
 
         o, r = pcs(temp_cib, "resource group add A7 A6")
         ac(o, "Error: 'A7' is already a resource\n")
-        self.assertEquals(1, r)
+        self.assertEqual(1, r)
 
         o, r = pcs(temp_cib, "resource create --no-default-ops A0 Dummy --clone")
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
         ac(o, "")
 
         o, r = pcs(temp_cib, "resource group add A0-clone A6")
         ac(o, "Error: 'A0-clone' is already a clone resource\n")
-        self.assertEquals(1, r)
+        self.assertEqual(1, r)
 
         o, r = pcs(temp_cib, "resource unclone A0-clone")
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
         ac(o, "")
 
         o, r = pcs(temp_cib, "resource master A0")
-        self.assertEquals(0, r)
+        self.assertEqual(0, r)
         ac(o, "")
 
         o, r = pcs(temp_cib, "resource group add A0-master A6")
         ac(o, "Error: 'A0-master' is already a master/slave resource\n")
-        self.assertEquals(1, r)
+        self.assertEqual(1, r)
 
         output, returnVal = pcs(temp_large_cib, "resource group add dummyGroup dummy1")
         assert returnVal == 0
@@ -1166,18 +1364,182 @@ Deleting Resource (and group) - dummylarge
 
         output, returnVal = pcs(temp_cib, "resource")
         assert returnVal == 0
-        assert output == ' F\t(ocf::heartbeat:Dummy):\tStopped \n G\t(ocf::heartbeat:Dummy):\tStopped \n H\t(ocf::heartbeat:Dummy):\tStopped \n Resource Group: RGA\n     A\t(ocf::heartbeat:Dummy):\tStopped \n     B\t(ocf::heartbeat:Dummy):\tStopped \n     C\t(ocf::heartbeat:Dummy):\tStopped \n     E\t(ocf::heartbeat:Dummy):\tStopped \n     D\t(ocf::heartbeat:Dummy):\tStopped \n     K\t(ocf::heartbeat:Dummy):\tStopped \n     J\t(ocf::heartbeat:Dummy):\tStopped \n     I\t(ocf::heartbeat:Du [...]
+        ac(output, """\
+ F\t(ocf::heartbeat:Dummy):\tStopped
+ G\t(ocf::heartbeat:Dummy):\tStopped
+ H\t(ocf::heartbeat:Dummy):\tStopped
+ Resource Group: RGA
+     A\t(ocf::heartbeat:Dummy):\tStopped
+     B\t(ocf::heartbeat:Dummy):\tStopped
+     C\t(ocf::heartbeat:Dummy):\tStopped
+     E\t(ocf::heartbeat:Dummy):\tStopped
+     D\t(ocf::heartbeat:Dummy):\tStopped
+     K\t(ocf::heartbeat:Dummy):\tStopped
+     J\t(ocf::heartbeat:Dummy):\tStopped
+     I\t(ocf::heartbeat:Dummy):\tStopped
+""")
 
         output, returnVal = pcs(temp_cib, "resource group list")
+        ac(output, "RGA: A B C E D K J I\n")
         assert returnVal == 0
-        assert output == "RGA: A B C E D K J I \n",[output]
+
+    def testRemoveLastResourceFromGroup(self):
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops d1 Dummy --group gr1"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops d2 Dummy --group gr2"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource show")
+        ac(output, """\
+ Resource Group: gr1
+     d1\t(ocf::heartbeat:Dummy):\tStopped
+ Resource Group: gr2
+     d2\t(ocf::heartbeat:Dummy):\tStopped
+""")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource group add gr1 d2")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource show")
+        ac(output, """\
+ Resource Group: gr1
+     d1\t(ocf::heartbeat:Dummy):\tStopped
+     d2\t(ocf::heartbeat:Dummy):\tStopped
+""")
+        self.assertEqual(0, returnVal)
+
+    def testRemoveLastResourceFromClonedGroup(self):
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops d1 Dummy --group gr1"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops d2 Dummy --group gr2"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource clone gr2")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource show")
+        ac(output, """\
+ Resource Group: gr1
+     d1\t(ocf::heartbeat:Dummy):\tStopped
+ Clone Set: gr2-clone [gr2]
+""")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource group add gr1 d2")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource show")
+        ac(output, """\
+ Resource Group: gr1
+     d1\t(ocf::heartbeat:Dummy):\tStopped
+     d2\t(ocf::heartbeat:Dummy):\tStopped
+""")
+        self.assertEqual(0, returnVal)
+
+    def testRemoveLastResourceFromMasteredGroup(self):
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops d1 Dummy --group gr1"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops d2 Dummy --group gr2"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource master gr2")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource show")
+        ac(output, """\
+ Resource Group: gr1
+     d1\t(ocf::heartbeat:Dummy):\tStopped
+ Master/Slave Set: gr2-master [gr2]
+""")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource group add gr1 d2")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource show")
+        ac(output, """\
+ Resource Group: gr1
+     d1\t(ocf::heartbeat:Dummy):\tStopped
+     d2\t(ocf::heartbeat:Dummy):\tStopped
+""")
+        self.assertEqual(0, returnVal)
 
     def testClusterConfig(self):
         self.setupClusterA(temp_cib)
 
         output, returnVal = pcs(temp_cib, "config")
         assert returnVal == 0
-        ac (output,'Cluster Name: test99\nCorosync Nodes:\n rh7-1 rh7-2 \nPacemaker Nodes:\n \n\nResources: \n Resource: ClusterIP6 (class=ocf provider=heartbeat type=IPaddr2)\n  Attributes: ip=192.168.0.99 cidr_netmask=32 \n  Operations: monitor interval=30s (ClusterIP6-monitor-interval-30s)\n Group: TestGroup1\n  Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)\n   Attributes: ip=192.168.0.99 cidr_netmask=32 \n   Operations: monitor interval=30s (ClusterIP-monitor-interv [...]
+        ac(output, """\
+Cluster Name: test99
+Corosync Nodes:
+ rh7-1 rh7-2
+Pacemaker Nodes:
+
+Resources:
+ Resource: ClusterIP6 (class=ocf provider=heartbeat type=IPaddr2)
+  Attributes: ip=192.168.0.99 cidr_netmask=32
+  Operations: monitor interval=30s (ClusterIP6-monitor-interval-30s)
+ Group: TestGroup1
+  Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+   Attributes: ip=192.168.0.99 cidr_netmask=32
+   Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
+ Group: TestGroup2
+  Resource: ClusterIP2 (class=ocf provider=heartbeat type=IPaddr2)
+   Attributes: ip=192.168.0.99 cidr_netmask=32
+   Operations: monitor interval=30s (ClusterIP2-monitor-interval-30s)
+  Resource: ClusterIP3 (class=ocf provider=heartbeat type=IPaddr2)
+   Attributes: ip=192.168.0.99 cidr_netmask=32
+   Operations: monitor interval=30s (ClusterIP3-monitor-interval-30s)
+ Clone: ClusterIP4-clone
+  Resource: ClusterIP4 (class=ocf provider=heartbeat type=IPaddr2)
+   Attributes: ip=192.168.0.99 cidr_netmask=32
+   Operations: monitor interval=30s (ClusterIP4-monitor-interval-30s)
+ Master: Master
+  Resource: ClusterIP5 (class=ocf provider=heartbeat type=IPaddr2)
+   Attributes: ip=192.168.0.99 cidr_netmask=32
+   Operations: monitor interval=30s (ClusterIP5-monitor-interval-30s)
+
+Stonith Devices:
+Fencing Levels:
+
+Location Constraints:
+Ordering Constraints:
+Colocation Constraints:
+
+Resources Defaults:
+ No defaults set
+Operations Defaults:
+ No defaults set
+
+Cluster Properties:
+""")
 
     def testCloneRemove(self):
         o,r = pcs("resource create --no-default-ops D1 Dummy --clone")
@@ -1291,7 +1653,52 @@ Deleting Resource - ClusterIP5
 
         output, returnVal = pcs(temp_cib, "config")
         assert returnVal == 0
-        ac(output,'Cluster Name: test99\nCorosync Nodes:\n rh7-1 rh7-2 \nPacemaker Nodes:\n \n\nResources: \n Resource: ClusterIP6 (class=ocf provider=heartbeat type=IPaddr2)\n  Attributes: ip=192.168.0.99 cidr_netmask=32 \n  Operations: monitor interval=30s (ClusterIP6-monitor-interval-30s)\n Group: TestGroup1\n  Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)\n   Attributes: ip=192.168.0.99 cidr_netmask=32 \n   Operations: monitor interval=30s (ClusterIP-monitor-interva [...]
+        ac(output, """\
+Cluster Name: test99
+Corosync Nodes:
+ rh7-1 rh7-2
+Pacemaker Nodes:
+
+Resources:
+ Resource: ClusterIP6 (class=ocf provider=heartbeat type=IPaddr2)
+  Attributes: ip=192.168.0.99 cidr_netmask=32
+  Operations: monitor interval=30s (ClusterIP6-monitor-interval-30s)
+ Group: TestGroup1
+  Resource: ClusterIP (class=ocf provider=heartbeat type=IPaddr2)
+   Attributes: ip=192.168.0.99 cidr_netmask=32
+   Operations: monitor interval=30s (ClusterIP-monitor-interval-30s)
+ Group: TestGroup2
+  Resource: ClusterIP2 (class=ocf provider=heartbeat type=IPaddr2)
+   Attributes: ip=192.168.0.99 cidr_netmask=32
+   Operations: monitor interval=30s (ClusterIP2-monitor-interval-30s)
+  Resource: ClusterIP3 (class=ocf provider=heartbeat type=IPaddr2)
+   Attributes: ip=192.168.0.99 cidr_netmask=32
+   Operations: monitor interval=30s (ClusterIP3-monitor-interval-30s)
+ Clone: ClusterIP4-clone
+  Resource: ClusterIP4 (class=ocf provider=heartbeat type=IPaddr2)
+   Attributes: ip=192.168.0.99 cidr_netmask=32
+   Operations: monitor interval=30s (ClusterIP4-monitor-interval-30s)
+ Resource: ClusterIP5 (class=ocf provider=heartbeat type=IPaddr2)
+  Attributes: ip=192.168.0.99 cidr_netmask=32
+  Operations: monitor interval=30s (ClusterIP5-monitor-interval-30s)
+
+Stonith Devices:
+Fencing Levels:
+
+Location Constraints:
+  Resource: ClusterIP5
+    Enabled on: rh7-1 (score:INFINITY) (id:location-ClusterIP5-rh7-1-INFINITY)
+    Enabled on: rh7-2 (score:INFINITY) (id:location-ClusterIP5-rh7-2-INFINITY)
+Ordering Constraints:
+Colocation Constraints:
+
+Resources Defaults:
+ No defaults set
+Operations Defaults:
+ No defaults set
+
+Cluster Properties:
+""")
 
         output, returnVal = pcs(temp_large_cib, "resource master dummylarge")
         ac(output, '')
@@ -1422,6 +1829,114 @@ Deleting Resource (and group and M/S) - dummylarge
         assert returnVal == 0
         assert output == ' Group: DGroup\n  Resource: D0 (class=ocf provider=heartbeat type=Dummy)\n   Operations: monitor interval=60s (D0-monitor-interval-60s)\n',[output]
 
+    def testCloneMasterManage(self):
+# is-managed on the primitive, attempting manage on primitive
+        output, returnVal = pcs(temp_cib, "resource create clone-unmanage Dummy --clone")
+        assert returnVal == 0
+        ac (output,'')
+
+        output, returnVal = pcs(temp_cib, "resource update clone-unmanage meta is-managed=false")
+        assert returnVal == 0
+        ac (output, '')
+
+        output, returnVal = pcs(temp_cib, "resource show clone-unmanage-clone")
+        assert returnVal == 0
+        ac (output, ' Clone: clone-unmanage-clone\n  Resource: clone-unmanage (class=ocf provider=heartbeat type=Dummy)\n   Meta Attrs: is-managed=false \n   Operations: start interval=0s timeout=20 (clone-unmanage-start-interval-0s)\n               stop interval=0s timeout=20 (clone-unmanage-stop-interval-0s)\n               monitor interval=10 timeout=20 (clone-unmanage-monitor-interval-10)\n')
+
+        output, returnVal = pcs(temp_cib, "resource manage clone-unmanage")
+        assert returnVal == 0
+        ac (output, '')
+
+        output, returnVal = pcs(temp_cib, "resource show clone-unmanage-clone")
+        assert returnVal == 0
+        ac (output, ' Clone: clone-unmanage-clone\n  Resource: clone-unmanage (class=ocf provider=heartbeat type=Dummy)\n   Operations: start interval=0s timeout=20 (clone-unmanage-start-interval-0s)\n               stop interval=0s timeout=20 (clone-unmanage-stop-interval-0s)\n               monitor interval=10 timeout=20 (clone-unmanage-monitor-interval-10)\n')
+        output, returnVal = pcs(temp_cib, "resource delete clone-unmanage")
+
+# is-managed on the clone, attempting manage on primitive
+        output, returnVal = pcs(temp_cib, "resource create clone-unmanage Dummy --clone")
+        ac (output,'')
+        assert returnVal == 0
+
+        output, returnVal = pcs(temp_cib, "resource update clone-unmanage-clone meta is-managed=false")
+        assert returnVal == 0
+        ac (output, '')
+
+        output, returnVal = pcs(temp_cib, "resource show clone-unmanage-clone")
+        assert returnVal == 0
+        ac (output, ' Clone: clone-unmanage-clone\n  Meta Attrs: is-managed=false \n  Resource: clone-unmanage (class=ocf provider=heartbeat type=Dummy)\n   Operations: start interval=0s timeout=20 (clone-unmanage-start-interval-0s)\n               stop interval=0s timeout=20 (clone-unmanage-stop-interval-0s)\n               monitor interval=10 timeout=20 (clone-unmanage-monitor-interval-10)\n')
+
+        output, returnVal = pcs(temp_cib, "resource manage clone-unmanage")
+        assert returnVal == 0
+        ac (output, '')
+
+        output, returnVal = pcs(temp_cib, "resource show clone-unmanage-clone")
+        assert returnVal == 0
+        ac (output, ' Clone: clone-unmanage-clone\n  Resource: clone-unmanage (class=ocf provider=heartbeat type=Dummy)\n   Operations: start interval=0s timeout=20 (clone-unmanage-start-interval-0s)\n               stop interval=0s timeout=20 (clone-unmanage-stop-interval-0s)\n               monitor interval=10 timeout=20 (clone-unmanage-monitor-interval-10)\n')
+        pcs(temp_cib, "resource delete clone-unmanage")
+
+# is-managed on the primitive, attempting manage on clone
+        output, returnVal = pcs(temp_cib, "resource create clone-unmanage Dummy --clone")
+        assert returnVal == 0
+        ac (output,'')
+
+        output, returnVal = pcs(temp_cib, "resource update clone-unmanage meta is-managed=false")
+        assert returnVal == 0
+        ac (output, '')
+
+        output, returnVal = pcs(temp_cib, "resource show clone-unmanage-clone")
+        assert returnVal == 0
+        ac (output, ' Clone: clone-unmanage-clone\n  Resource: clone-unmanage (class=ocf provider=heartbeat type=Dummy)\n   Meta Attrs: is-managed=false \n   Operations: start interval=0s timeout=20 (clone-unmanage-start-interval-0s)\n               stop interval=0s timeout=20 (clone-unmanage-stop-interval-0s)\n               monitor interval=10 timeout=20 (clone-unmanage-monitor-interval-10)\n')
+
+        output, returnVal = pcs(temp_cib, "resource manage clone-unmanage-clone")
+        assert returnVal == 0
+        ac (output, '')
+
+        output, returnVal = pcs(temp_cib, "resource show clone-unmanage-clone")
+        assert returnVal == 0
+        ac (output, ' Clone: clone-unmanage-clone\n  Resource: clone-unmanage (class=ocf provider=heartbeat type=Dummy)\n   Operations: start interval=0s timeout=20 (clone-unmanage-start-interval-0s)\n               stop interval=0s timeout=20 (clone-unmanage-stop-interval-0s)\n               monitor interval=10 timeout=20 (clone-unmanage-monitor-interval-10)\n')
+        pcs(temp_cib, "resource delete clone-unmanage")
+
+# is-managed on the clone, attempting manage on clone
+        output, returnVal = pcs(temp_cib, "resource create clone-unmanage Dummy --clone")
+        assert returnVal == 0
+        ac (output,'')
+
+        output, returnVal = pcs(temp_cib, "resource update clone-unmanage-clone meta is-managed=false")
+        assert returnVal == 0
+        ac (output, '')
+
+        output, returnVal = pcs(temp_cib, "resource show clone-unmanage-clone")
+        assert returnVal == 0
+        ac (output, ' Clone: clone-unmanage-clone\n  Meta Attrs: is-managed=false \n  Resource: clone-unmanage (class=ocf provider=heartbeat type=Dummy)\n   Operations: start interval=0s timeout=20 (clone-unmanage-start-interval-0s)\n               stop interval=0s timeout=20 (clone-unmanage-stop-interval-0s)\n               monitor interval=10 timeout=20 (clone-unmanage-monitor-interval-10)\n')
+
+        output, returnVal = pcs(temp_cib, "resource manage clone-unmanage-clone")
+        assert returnVal == 0
+        ac (output, '')
+
+        output, returnVal = pcs(temp_cib, "resource show clone-unmanage-clone")
+        assert returnVal == 0
+        ac (output, ' Clone: clone-unmanage-clone\n  Resource: clone-unmanage (class=ocf provider=heartbeat type=Dummy)\n   Operations: start interval=0s timeout=20 (clone-unmanage-start-interval-0s)\n               stop interval=0s timeout=20 (clone-unmanage-stop-interval-0s)\n               monitor interval=10 timeout=20 (clone-unmanage-monitor-interval-10)\n')
+
+        output, returnVal = pcs(temp_cib, "resource create master-unmanage ocf:pacemaker:Stateful --master --no-default-ops")
+        ac (output,'')
+        assert returnVal == 0
+
+        output, returnVal = pcs(temp_cib, "resource update master-unmanage-master meta is-managed=false")
+        assert returnVal == 0
+        ac (output, '')
+
+        output, returnVal = pcs(temp_cib, "resource show master-unmanage-master")
+        assert returnVal == 0
+        ac (output, ' Master: master-unmanage-master\n  Meta Attrs: is-managed=false \n  Resource: master-unmanage (class=ocf provider=pacemaker type=Stateful)\n   Operations: monitor interval=60s (master-unmanage-monitor-interval-60s)\n')
+
+        output, returnVal = pcs(temp_cib, "resource manage master-unmanage")
+        assert returnVal == 0
+        ac (output, '')
+
+        output, returnVal = pcs(temp_cib, "resource show master-unmanage-master")
+        assert returnVal == 0
+        ac (output, ' Master: master-unmanage-master\n  Resource: master-unmanage (class=ocf provider=pacemaker type=Stateful)\n   Operations: monitor interval=60s (master-unmanage-monitor-interval-60s)\n')
+
     def testGroupManage(self):
         o,r = pcs(temp_cib, "resource create --no-default-ops D1 Dummy --group AG")
         ac(o,"")
@@ -1500,8 +2015,13 @@ Deleting Resource (and group and M/S) - dummylarge
         assert output == "", [output]
 
         output, returnVal = pcs(temp_cib, "resource show D0")
+        ac(output, """\
+ Resource: D0 (class=ocf provider=heartbeat type=Dummy)
+  Attributes: test=testB test2=testC test4=test4A test3=testD
+  Meta Attrs: test7=test7a test6= 
+  Operations: monitor interval=35 (D0-monitor-interval-35)
+""")
         assert returnVal == 0
-        assert output == " Resource: D0 (class=ocf provider=heartbeat type=Dummy)\n  Attributes: test=testB test2=testC test4=test4A test3=testD \n  Meta Attrs: test7=test7a test6= \n  Operations: monitor interval=35 (D0-monitor-interval-35)\n", [output]
 
     def testMetaAttrs(self):
         output, returnVal = pcs(temp_cib, "resource create --no-default-ops --force D0 Dummy test=testA test2=test2a op monitor interval=30 meta test5=test5a test6=test6a")
@@ -1529,8 +2049,19 @@ Deleting Resource (and group and M/S) - dummylarge
         assert output == "", [output]
 
         output, returnVal = pcs(temp_cib, "resource show --full")
+        ac(output, """\
+ Resource: D0 (class=ocf provider=heartbeat type=Dummy)
+  Attributes: test=testC test2=test2a
+  Meta Attrs: test5=test5a test7=test7a 
+  Operations: monitor interval=35 (D0-monitor-interval-35)
+ Group: TestRG
+  Meta Attrs: testrgmeta=mymeta testrgmeta2=mymeta2 
+  Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+   Attributes: test=testA test2=test2a
+   Meta Attrs: d1meta=superd1meta 
+   Operations: monitor interval=30 (D1-monitor-interval-30)
+""")
         assert returnVal == 0
-        assert output == " Resource: D0 (class=ocf provider=heartbeat type=Dummy)\n  Attributes: test=testC test2=test2a \n  Meta Attrs: test5=test5a test7=test7a \n  Operations: monitor interval=35 (D0-monitor-interval-35)\n Group: TestRG\n  Meta Attrs: testrgmeta=mymeta testrgmeta2=mymeta2 \n  Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n   Attributes: test=testA test2=test2a \n   Meta Attrs: d1meta=superd1meta \n   Operations: monitor interval=30 (D1-monitor-interval-30)\n" [...]
 
     def testMSGroup(self):
         output, returnVal  = pcs(temp_cib, "resource create --no-default-ops D0 Dummy")
@@ -1582,45 +2113,408 @@ Deleting Resource (and group and M/S) - dummylarge
         ac(o,"")
         assert r == 0
 
-    def testCloneGroupMember(self):
-        o,r = pcs(temp_cib, "resource create --no-default-ops D0 Dummy --group AG")
-        ac(o,"")
-        assert r == 0
+    def testUnclone(self):
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops dummy1 Dummy"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops D1 Dummy --group AG")
-        ac(o,"")
-        assert r == 0
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops dummy2 Dummy"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
 
-        o,r = pcs(temp_cib, "resource clone D0")
-        ac(o,"")
-        assert r == 0
+        # try to unclone a non-cloned resource
+        output, returnVal = pcs(temp_cib, "resource unclone dummy1")
+        ac(output, "Error: 'dummy1' is not a clone resource\n")
+        self.assertEqual(1, returnVal)
 
-        o,r = pcs(temp_cib, "resource")
-        ac(o," Resource Group: AG\n     D1\t(ocf::heartbeat:Dummy):\tStopped \n Clone Set: D0-clone [D0]\n")
-        assert r == 0
+        output, returnVal = pcs(temp_cib, "resource group add gr dummy1")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
 
-        o,r = pcs(temp_cib, "resource clone D1")
-        ac(o,"")
-        assert r == 0
+        output, returnVal = pcs(temp_cib, "resource unclone gr")
+        ac(output, "Error: 'gr' is not a clone resource\n")
+        self.assertEqual(1, returnVal)
 
-        o,r = pcs(temp_cib, "resource")
-        ac(o," Clone Set: D0-clone [D0]\n Clone Set: D1-clone [D1]\n")
-        assert r == 0
+        # unclone with a cloned primitive specified
+        output, returnVal = pcs(temp_cib, "resource clone dummy2")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops D2 Dummy --group AG2")
-        ac(o,"")
-        assert r == 0
+        output, returnVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Group: gr
+  Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (dummy1-monitor-interval-60s)
+ Clone: dummy2-clone
+  Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (dummy2-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
 
-        o,r = pcs(temp_cib, "resource create --no-default-ops D3 Dummy --group AG2")
-        ac(o,"")
-        assert r == 0
+        output, returnVal = pcs(temp_cib, "resource unclone dummy2")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Group: gr
+  Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (dummy1-monitor-interval-60s)
+ Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+  Operations: monitor interval=60s (dummy2-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        # unclone with a clone itself specified
+        output, returnVal = pcs(temp_cib, "resource group add gr dummy2")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource clone gr")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Clone: gr-clone
+  Group: gr
+   Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+    Operations: monitor interval=60s (dummy1-monitor-interval-60s)
+   Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+    Operations: monitor interval=60s (dummy2-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource unclone gr-clone")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Group: gr
+  Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (dummy1-monitor-interval-60s)
+  Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (dummy2-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        # unclone with a cloned group specified
+        output, returnVal = pcs(temp_cib, "resource clone gr")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Clone: gr-clone
+  Group: gr
+   Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+    Operations: monitor interval=60s (dummy1-monitor-interval-60s)
+   Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+    Operations: monitor interval=60s (dummy2-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource unclone gr")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Group: gr
+  Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (dummy1-monitor-interval-60s)
+  Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (dummy2-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        # unclone with a cloned grouped resource specified
+        output, returnVal = pcs(temp_cib, "resource clone gr")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Clone: gr-clone
+  Group: gr
+   Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+    Operations: monitor interval=60s (dummy1-monitor-interval-60s)
+   Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+    Operations: monitor interval=60s (dummy2-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource unclone dummy1")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Clone: gr-clone
+  Group: gr
+   Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+    Operations: monitor interval=60s (dummy2-monitor-interval-60s)
+ Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+  Operations: monitor interval=60s (dummy1-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource unclone dummy2")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Resource: dummy1 (class=ocf provider=heartbeat type=Dummy)
+  Operations: monitor interval=60s (dummy1-monitor-interval-60s)
+ Resource: dummy2 (class=ocf provider=heartbeat type=Dummy)
+  Operations: monitor interval=60s (dummy2-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+    def testUncloneMaster(self):
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops dummy1 ocf:pacemaker:Stateful"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops dummy2 ocf:pacemaker:Stateful"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        # try to unclone a non-cloned resource
+        output, returnVal = pcs(temp_cib, "resource unclone dummy1")
+        ac(output, "Error: 'dummy1' is not a clone resource\n")
+        self.assertEqual(1, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource group add gr dummy1")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource unclone gr")
+        ac(output, "Error: 'gr' is not a clone resource\n")
+        self.assertEqual(1, returnVal)
+
+        # unclone with a cloned primitive specified
+        output, returnVal = pcs(temp_cib, "resource master dummy2")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Group: gr
+  Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
+   Operations: monitor interval=60s (dummy1-monitor-interval-60s)
+ Master: dummy2-master
+  Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
+   Operations: monitor interval=60s (dummy2-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource unclone dummy2")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Group: gr
+  Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
+   Operations: monitor interval=60s (dummy1-monitor-interval-60s)
+ Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
+  Operations: monitor interval=60s (dummy2-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        # unclone with a clone itself specified
+        output, returnVal = pcs(temp_cib, "resource group add gr dummy2")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource master gr")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Master: gr-master
+  Group: gr
+   Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
+    Operations: monitor interval=60s (dummy1-monitor-interval-60s)
+   Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
+    Operations: monitor interval=60s (dummy2-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource unclone gr-master")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Group: gr
+  Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
+   Operations: monitor interval=60s (dummy1-monitor-interval-60s)
+  Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
+   Operations: monitor interval=60s (dummy2-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        # unclone with a cloned group specified
+        output, returnVal = pcs(temp_cib, "resource master gr")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Master: gr-master
+  Group: gr
+   Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
+    Operations: monitor interval=60s (dummy1-monitor-interval-60s)
+   Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
+    Operations: monitor interval=60s (dummy2-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource unclone gr")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Group: gr
+  Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
+   Operations: monitor interval=60s (dummy1-monitor-interval-60s)
+  Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
+   Operations: monitor interval=60s (dummy2-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        # unclone with a cloned grouped resource specified
+        output, returnVal = pcs(temp_cib, "resource ungroup gr dummy2")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource master gr")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
+  Operations: monitor interval=60s (dummy2-monitor-interval-60s)
+ Master: gr-master
+  Group: gr
+   Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
+    Operations: monitor interval=60s (dummy1-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource unclone dummy1")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
+  Operations: monitor interval=60s (dummy2-monitor-interval-60s)
+ Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
+  Operations: monitor interval=60s (dummy1-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource group add gr dummy1 dummy2")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource master gr")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Master: gr-master
+  Group: gr
+   Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
+    Operations: monitor interval=60s (dummy1-monitor-interval-60s)
+   Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
+    Operations: monitor interval=60s (dummy2-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource unclone dummy2")
+        ac(output, "Error: Groups that have more than one resource and are master/slave resources cannot be removed.  The group may be deleted with 'pcs resource delete gr'.\n")
+        self.assertEqual(1, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Master: gr-master
+  Group: gr
+   Resource: dummy1 (class=ocf provider=pacemaker type=Stateful)
+    Operations: monitor interval=60s (dummy1-monitor-interval-60s)
+   Resource: dummy2 (class=ocf provider=pacemaker type=Stateful)
+    Operations: monitor interval=60s (dummy2-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+    def testCloneGroupMember(self):
+        o,r = pcs(temp_cib, "resource create --no-default-ops D0 Dummy --group AG")
+        ac(o,"")
+        assert r == 0
+
+        o,r = pcs(temp_cib, "resource create --no-default-ops D1 Dummy --group AG")
+        ac(o,"")
+        assert r == 0
+
+        o,r = pcs(temp_cib, "resource clone D0")
+        ac(o,"")
+        assert r == 0
+
+        o,r = pcs(temp_cib, "resource")
+        ac(o,"""\
+ Resource Group: AG
+     D1\t(ocf::heartbeat:Dummy):\tStopped
+ Clone Set: D0-clone [D0]
+""")
+        assert r == 0
+
+        o,r = pcs(temp_cib, "resource clone D1")
+        ac(o,"")
+        assert r == 0
+
+        o,r = pcs(temp_cib, "resource")
+        ac(o," Clone Set: D0-clone [D0]\n Clone Set: D1-clone [D1]\n")
+        assert r == 0
+
+        o,r = pcs(temp_cib, "resource create --no-default-ops D2 Dummy --group AG2")
+        ac(o,"")
+        assert r == 0
+
+        o,r = pcs(temp_cib, "resource create --no-default-ops D3 Dummy --group AG2")
+        ac(o,"")
+        assert r == 0
 
         o,r = pcs(temp_cib, "resource master D2")
         ac(o,"")
         assert r == 0
 
         o,r = pcs(temp_cib, "resource")
-        ac(o," Clone Set: D0-clone [D0]\n Clone Set: D1-clone [D1]\n Resource Group: AG2\n     D3\t(ocf::heartbeat:Dummy):\tStopped \n Master/Slave Set: D2-master [D2]\n")
+        ac(o,"""\
+ Clone Set: D0-clone [D0]
+ Clone Set: D1-clone [D1]
+ Resource Group: AG2
+     D3\t(ocf::heartbeat:Dummy):\tStopped
+ Master/Slave Set: D2-master [D2]
+""")
         assert r == 0
 
         o,r = pcs(temp_cib, "resource master D3")
@@ -1718,23 +2612,23 @@ Deleting Resource (and group and M/S) - dummylarge
 
         output, returnVal = utils.run(["cibadmin", "-M", '--xml-text', '<nodes><node id="1" uname="rh7-1"><instance_attributes id="nodes-1"/></node><node id="2" uname="rh7-2"><instance_attributes id="nodes-2"/></node></nodes>'])
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib, "resource create --no-default-ops dummy Dummy"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource move dummy")
         ac(output, """\
 Error: You must specify a node when moving/banning a stopped resource
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource move dummy rh7-1")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint --full")
         ac(output, """\
@@ -1744,11 +2638,11 @@ Location Constraints:
 Ordering Constraints:
 Colocation Constraints:
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource clear dummy")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint --full")
         ac(output, """\
@@ -1758,8 +2652,11 @@ Colocation Constraints:
 """)
 
         output, returnVal = pcs(temp_cib, "resource ban dummy rh7-1")
-        ac(output, "")
-        self.assertEquals(0, returnVal)
+        ac(output, """\
+Warning: Creating location constraint cli-ban-dummy-on-rh7-1 with a score of -INFINITY for resource dummy on node rh7-1.
+This will prevent dummy from running on rh7-1 until the constraint is removed. This will be the case even if rh7-1 is the last node in the cluster.
+""")
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint --full")
         ac(output, """\
@@ -1769,11 +2666,11 @@ Location Constraints:
 Ordering Constraints:
 Colocation Constraints:
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource clear dummy")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint --full")
         ac(output, """\
@@ -1786,7 +2683,7 @@ Colocation Constraints:
             temp_cib, "resource move dummy rh7-1 lifetime=1H"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint --full")
         output = re.sub("\d{4}-\d\d-\d\d \d\d:\d\d:\d\dZ", "{datetime}", output)
@@ -1800,11 +2697,11 @@ Location Constraints:
 Ordering Constraints:
 Colocation Constraints:
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource clear dummy")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint --full")
         ac(output, """\
@@ -1816,8 +2713,11 @@ Colocation Constraints:
         output, returnVal = pcs(
             temp_cib, "resource ban dummy rh7-1 lifetime=P1H"
         )
-        ac(output, "")
-        self.assertEquals(0, returnVal)
+        ac(output, """\
+Warning: Creating location constraint cli-ban-dummy-on-rh7-1 with a score of -INFINITY for resource dummy on node rh7-1.
+This will prevent dummy from running on rh7-1 until the constraint is removed. This will be the case even if rh7-1 is the last node in the cluster.
+""")
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint --full")
         output = re.sub("\d{4}-\d\d-\d\d \d\d:\d\d:\d\dZ", "{datetime}", output)
@@ -1831,22 +2731,22 @@ Location Constraints:
 Ordering Constraints:
 Colocation Constraints:
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
 
         output, returnVal = pcs(temp_cib, "resource ban dummy rh7-1 rh7-1")
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib, "resource ban dummy rh7-1 lifetime=1H lifetime=1H"
         )
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource move dummy rh7-1 --master")
         ac(output, """\
 Error: when specifying --master you must use the master id
 """)
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
     def testCloneMoveBanClear(self):
         # Load nodes into cib so move will work
@@ -1854,47 +2754,50 @@ Error: when specifying --master you must use the master id
         utils.filename = temp_cib
         output, returnVal = utils.run(["cibadmin", "-M", '--xml-text', '<nodes><node id="1" uname="rh7-1"><instance_attributes id="nodes-1"/></node><node id="2" uname="rh7-2"><instance_attributes id="nodes-2"/></node></nodes>'])
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib, "resource create --no-default-ops D1 Dummy --clone"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib, "resource create --no-default-ops D2 Dummy --group DG"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource clone DG")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource move D1")
         ac(output, "Error: cannot move cloned resources\n")
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource move D1-clone")
         ac(output, "Error: cannot move cloned resources\n")
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource move D2")
         ac(output, "Error: cannot move cloned resources\n")
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource move DG")
         ac(output, "Error: cannot move cloned resources\n")
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource move DG-clone")
         ac(output, "Error: cannot move cloned resources\n")
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource ban DG-clone rh7-1")
-        ac(output, "")
-        self.assertEquals(0, returnVal)
+        ac(output, """\
+Warning: Creating location constraint cli-ban-DG-clone-on-rh7-1 with a score of -INFINITY for resource DG-clone on node rh7-1.
+This will prevent DG-clone from running on rh7-1 until the constraint is removed. This will be the case even if rh7-1 is the last node in the cluster.
+""")
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint --full")
         ac(output, """\
@@ -1904,11 +2807,11 @@ Location Constraints:
 Ordering Constraints:
 Colocation Constraints:
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "resource clear DG-clone")
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint --full")
         ac(output, """\
@@ -1916,7 +2819,7 @@ Location Constraints:
 Ordering Constraints:
 Colocation Constraints:
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
     def testNoMoveMSClone(self):
         output, returnVal  = pcs(temp_cib, "resource create --no-default-ops D0 Dummy")
@@ -1956,7 +2859,9 @@ Colocation Constraints:
         assert output == ' Resource: D0 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s (D0-monitor-interval-60s)\n Clone: D1-clone\n  Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n   Operations: monitor interval=60s (D1-monitor-interval-60s)\n Master: D2-master\n  Resource: D2 (class=ocf provider=heartbeat type=Dummy)\n   Operations: monitor interval=60s (D2-monitor-interval-60s)\n', [output]
 
     def testMasterOfGroupMove(self):
-        o,r = pcs("resource create stateful Stateful --group group1")
+        o,r = pcs(
+            "resource create stateful ocf:pacemaker:Stateful --group group1"
+        )
         ac(o, """\
 Warning: changing a monitor operation interval from 10 to 11 to make the operation unique
 """)
@@ -2039,7 +2944,11 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
 
         o,r = pcs(temp_cib, "resource show")
         assert r == 0
-        ac(o," Resource Group: DGroup\n     D1\t(ocf::heartbeat:Dummy):\tStopped \n     D2\t(ocf::heartbeat:Dummy):\tStopped \n")
+        ac(o,"""\
+ Resource Group: DGroup
+     D1\t(ocf::heartbeat:Dummy):\tStopped
+     D2\t(ocf::heartbeat:Dummy):\tStopped
+""")
 
         o,r = pcs(temp_cib, "resource move DGroup rh7-1")
         ac(o,"")
@@ -2122,6 +3031,10 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         assert returnVal == 1
         assert output == "Error: unable to create resource/fence device 'dlm', 'dlm' already exists on this system\n", [output]
 
+        output,returnVal = pcs(temp_cib, "resource create --no-default-ops dlm-clone ocf:pacemaker:controld op monitor interval=10s clone meta interleave=true clone-node-max=1 ordered=true")
+        assert returnVal == 1
+        assert output == "Error: unable to create resource/fence device 'dlm-clone', 'dlm-clone' already exists on this system\n", [output]
+
         output,returnVal = pcs(temp_cib, "resource --full")
         assert returnVal == 0
         assert output == " Clone: dlm-clone\n  Meta Attrs: interleave=true clone-node-max=1 ordered=true \n  Resource: dlm (class=ocf provider=pacemaker type=controld)\n   Operations: monitor interval=10s (dlm-monitor-interval-10s)\n", [output]
@@ -2134,6 +3047,122 @@ Warning: changing a monitor operation interval from 10 to 11 to make the operati
         ac(output, '')
         assert returnVal == 0
 
+    def testResourceCloneId(self):
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops dummy-clone Dummy"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops dummy Dummy"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource clone dummy")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource show --full")
+        ac(output, """\
+ Resource: dummy-clone (class=ocf provider=heartbeat type=Dummy)
+  Operations: monitor interval=60s (dummy-clone-monitor-interval-60s)
+ Clone: dummy-clone-1
+  Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource delete dummy")
+        ac(output, "Deleting Resource - dummy\n")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops dummy Dummy --clone"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource show --full")
+        ac(output, """\
+ Resource: dummy-clone (class=ocf provider=heartbeat type=Dummy)
+  Operations: monitor interval=60s (dummy-clone-monitor-interval-60s)
+ Clone: dummy-clone-1
+  Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+    def testResourceMasterId(self):
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops dummy-master Dummy"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops dummy Dummy"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource master dummy")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource show --full")
+        ac(output, """\
+ Resource: dummy-master (class=ocf provider=heartbeat type=Dummy)
+  Operations: monitor interval=60s (dummy-master-monitor-interval-60s)
+ Master: dummy-master-1
+  Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource unclone dummy")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource master dummy-master dummy")
+        ac(output, "Error: dummy-master already exists in the cib\n")
+        self.assertEqual(1, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource master dummy-master0 dummy")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource show --full")
+        ac(output, """\
+ Resource: dummy-master (class=ocf provider=heartbeat type=Dummy)
+  Operations: monitor interval=60s (dummy-master-monitor-interval-60s)
+ Master: dummy-master0
+  Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource delete dummy")
+        ac(output, "Deleting Resource - dummy\n")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops dummy Dummy --master"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource show --full")
+        ac(output, """\
+ Resource: dummy-master (class=ocf provider=heartbeat type=Dummy)
+  Operations: monitor interval=60s (dummy-master-monitor-interval-60s)
+ Master: dummy-master-1
+  Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
     def testResourceCloneUpdate(self):
         o, r  = pcs(temp_cib, "resource create --no-default-ops D1 Dummy --clone")
         assert r == 0
@@ -2219,6 +3248,30 @@ Deleting Resource (and group and M/S) - A2
         o,r = pcs(temp_cib, "resource master AGMaster AG")
         assert r == 0
 
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops A Dummy"
+        )
+        ac(output, """\
+Error: unable to create resource/fence device 'A', 'A' already exists on this system
+""")
+        self.assertEqual(1, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops AG Dummy"
+        )
+        ac(output, """\
+Error: unable to create resource/fence device 'AG', 'AG' already exists on this system
+""")
+        self.assertEqual(1, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops AGMaster Dummy"
+        )
+        ac(output, """\
+Error: unable to create resource/fence device 'AGMaster', 'AGMaster' already exists on this system
+""")
+        self.assertEqual(1, returnVal)
+
         o,r = pcs(temp_cib, "resource ungroup AG")
         ac(o,"Error: Groups that have more than one resource and are master/slave resources cannot be removed.  The group may be deleted with 'pcs resource delete AG'.\n")
         assert r == 1
@@ -2236,11 +3289,105 @@ Deleting Resource (and group and M/S) - A2
         ac(o," Master: AGMaster\n  Resource: A (class=ocf provider=heartbeat type=Dummy)\n   Operations: monitor interval=60s (A-monitor-interval-60s)\n")
         assert r == 0
 
+    def testClonedGroup(self):
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops D1 Dummy --group DG"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops D2 Dummy --group DG"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource clone DG")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource show --full")
+        ac(output, """\
+ Clone: DG-clone
+  Group: DG
+   Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+    Operations: monitor interval=60s (D1-monitor-interval-60s)
+   Resource: D2 (class=ocf provider=heartbeat type=Dummy)
+    Operations: monitor interval=60s (D2-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops D1 Dummy"
+        )
+        ac(output, """\
+Error: unable to create resource/fence device 'D1', 'D1' already exists on this system
+""")
+        self.assertEqual(1, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops DG Dummy"
+        )
+        ac(output, """\
+Error: unable to create resource/fence device 'DG', 'DG' already exists on this system
+""")
+        self.assertEqual(1, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib, "resource create --no-default-ops DG-clone Dummy"
+        )
+        ac(output, """\
+Error: unable to create resource/fence device 'DG-clone', 'DG-clone' already exists on this system
+""")
+        self.assertEqual(1, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource ungroup DG")
+        ac(output, """\
+Error: Cannot remove more than one resource from cloned group
+""")
+        self.assertEqual(1, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource ungroup DG D1 D2")
+        ac(output, """\
+Error: Cannot remove more than one resource from cloned group
+""")
+        self.assertEqual(1, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource ungroup DG D1")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource show --full")
+        ac(output, """\
+ Clone: DG-clone
+  Group: DG
+   Resource: D2 (class=ocf provider=heartbeat type=Dummy)
+    Operations: monitor interval=60s (D2-monitor-interval-60s)
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+  Operations: monitor interval=60s (D1-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource ungroup DG")
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "resource show --full")
+        ac(output, """\
+ Clone: DG-clone
+  Resource: D2 (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (D2-monitor-interval-60s)
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+  Operations: monitor interval=60s (D1-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
     def testResourceEnable(self):
         o,r = pcs(temp_cib, "resource create --no-default-ops D1 Dummy")
         ac(o,"")
         assert r == 0
 
+        # primitive resource
         o,r = pcs(temp_cib, "resource disable D1")
         ac(o,"")
         assert r == 0
@@ -2257,6 +3404,7 @@ Deleting Resource (and group and M/S) - A2
         ac(o," Resource: D1 (class=ocf provider=heartbeat type=Dummy)\n  Operations: monitor interval=60s (D1-monitor-interval-60s)\n")
         assert r == 0
 
+        # bad resource name
         o,r = pcs(temp_cib, "resource enable NoExist")
         ac(o,"Error: unable to find a resource/clone/master/group: NoExist\n")
         assert r == 1
@@ -2265,9 +3413,30 @@ Deleting Resource (and group and M/S) - A2
         ac(o,"Error: unable to find a resource/clone/master/group: NoExist\n")
         assert r == 1
 
+        # cloned group
+        output, retVal = pcs(temp_cib, "resource create dummy0 Dummy --group group0")
+        ac(output, "")
+        assert retVal == 0
+        output, retVal = pcs(temp_cib, "resource clone group0")
+        ac(output, "")
+        assert retVal == 0
+        output, retVal = pcs(temp_cib, "resource show group0-clone")
+        ac(output," Clone: group0-clone\n  Group: group0\n   Resource: dummy0 (class=ocf provider=heartbeat type=Dummy)\n    Operations: start interval=0s timeout=20 (dummy0-start-interval-0s)\n                stop interval=0s timeout=20 (dummy0-stop-interval-0s)\n                monitor interval=10 timeout=20 (dummy0-monitor-interval-10)\n")
+        assert retVal == 0
+        output, retVal = pcs(temp_cib, "resource disable group0")
+        ac(output, "")
+        assert retVal == 0
+
+    def testResourceEnableUnmanaged(self):
+        o,r = pcs(temp_cib, "resource create --no-default-ops D1 Dummy")
+        ac(o,"")
+        assert r == 0
+
         o,r = pcs(temp_cib, "resource create --no-default-ops D2 Dummy")
         ac(o,"")
         assert r == 0
+
+        # unmanaged resource - by meta attribute
         o,r = pcs(temp_cib, "resource unmanage D2")
         ac(o,"")
         assert r == 0
@@ -2278,6 +3447,7 @@ Deleting Resource (and group and M/S) - A2
         ac(o,"Warning: 'D2' is unmanaged\n")
         assert r == 0
 
+        # unmanaged resource - by cluster property
         o,r = pcs(temp_cib, "property set is-managed-default=false")
         ac(o,"")
         assert r == 0
@@ -2291,6 +3461,7 @@ Deleting Resource (and group and M/S) - A2
         ac(o,"")
         assert r == 0
 
+        # resource in an unmanaged group
         o,r = pcs(temp_cib, "resource create --no-default-ops D3 Dummy")
         ac(o,"")
         assert r == 0
@@ -2316,6 +3487,7 @@ Deleting Resource (and group and M/S) - A2
         ac(o,"")
         assert r == 0
 
+        # unmanaged resource in a group
         o,r = pcs(temp_cib, "resource create --no-default-ops D4 Dummy")
         ac(o,"")
         assert r == 0
@@ -2335,18 +3507,272 @@ Deleting Resource (and group and M/S) - A2
         ac(o,"")
         assert r == 0
 
-        output, retVal = pcs(temp_cib, "resource create dummy0 Dummy --group group0")
+    def testResourceEnableClone(self):
+        output, retVal = pcs(
+            temp_cib, "resource create --no-default-ops dummy Dummy --clone"
+        )
         ac(output, "")
-        assert retVal == 0
-        output, retVal = pcs(temp_cib, "resource clone group0")
+        self.assertEqual(retVal, 0)
+
+        # disable primitive, enable clone
+        output, retVal = pcs(temp_cib, "resource disable dummy")
         ac(output, "")
-        assert retVal == 0
-        output, retVal = pcs(temp_cib, "resource show group0-clone")
-        ac(output," Clone: group0-clone\n  Group: group0\n   Resource: dummy0 (class=ocf provider=heartbeat type=Dummy)\n    Operations: start interval=0s timeout=20 (dummy0-start-interval-0s)\n                stop interval=0s timeout=20 (dummy0-stop-interval-0s)\n                monitor interval=10 timeout=20 (dummy0-monitor-interval-10)\n")
-        assert retVal == 0
-        output, retVal = pcs(temp_cib, "resource disable group0")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource enable dummy-clone")
         ac(output, "")
-        assert retVal == 0
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource show dummy-clone")
+        ac(output, """\
+ Clone: dummy-clone
+  Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
+        self.assertEqual(retVal, 0)
+
+        # disable clone, enable primitive
+        output, retVal = pcs(temp_cib, "resource disable dummy-clone")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource enable dummy")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource show dummy-clone")
+        ac(output, """\
+ Clone: dummy-clone
+  Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
+        self.assertEqual(retVal, 0)
+
+        # disable both primitive and clone, enable clone
+        output, retVal = pcs(temp_cib, "resource disable dummy-clone")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource disable dummy")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource enable dummy-clone")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource show dummy-clone")
+        ac(output, """\
+ Clone: dummy-clone
+  Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
+        self.assertEqual(retVal, 0)
+
+        # disable both primitive and clone, enable primitive
+        output, retVal = pcs(temp_cib, "resource disable dummy-clone")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource disable dummy")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource enable dummy")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource show dummy-clone")
+        ac(output, """\
+ Clone: dummy-clone
+  Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
+        self.assertEqual(retVal, 0)
+
+        # disable via 'resource disable', enable via 'resource meta'
+        output, retVal = pcs(temp_cib, "resource disable dummy-clone")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource show dummy-clone")
+        ac(output, """\
+ Clone: dummy-clone
+  Meta Attrs: target-role=Stopped 
+  Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
+
+        output, retVal = pcs(temp_cib, "resource meta dummy-clone target-role=")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource show dummy-clone")
+        ac(output, """\
+ Clone: dummy-clone
+  Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
+
+        # disable via 'resource meta', enable via 'resource enable'
+        output, retVal = pcs(
+            temp_cib, "resource meta dummy-clone target-role=Stopped"
+        )
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource show dummy-clone")
+        ac(output, """\
+ Clone: dummy-clone
+  Meta Attrs: target-role=Stopped 
+  Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
+
+        output, retVal = pcs(temp_cib, "resource enable dummy-clone")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource show dummy-clone")
+        ac(output, """\
+ Clone: dummy-clone
+  Resource: dummy (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
+
+    def testResourceEnableMaster(self):
+        output, retVal = pcs(
+            temp_cib,
+            "resource create --no-default-ops dummy ocf:pacemaker:Stateful --master"
+        )
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        # disable primitive, enable master
+        output, retVal = pcs(temp_cib, "resource disable dummy")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource enable dummy-master")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource show dummy-master")
+        ac(output, """\
+ Master: dummy-master
+  Resource: dummy (class=ocf provider=pacemaker type=Stateful)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
+        self.assertEqual(retVal, 0)
+
+        # disable master, enable primitive
+        output, retVal = pcs(temp_cib, "resource disable dummy-master")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource enable dummy")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource show dummy-master")
+        ac(output, """\
+ Master: dummy-master
+  Resource: dummy (class=ocf provider=pacemaker type=Stateful)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
+        self.assertEqual(retVal, 0)
+
+        # disable both primitive and master, enable master
+        output, retVal = pcs(temp_cib, "resource disable dummy-master")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource disable dummy")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource enable dummy-master")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource show dummy-master")
+        ac(output, """\
+ Master: dummy-master
+  Resource: dummy (class=ocf provider=pacemaker type=Stateful)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
+        self.assertEqual(retVal, 0)
+
+        # disable both primitive and master, enable primitive
+        output, retVal = pcs(temp_cib, "resource disable dummy-master")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource disable dummy")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource enable dummy")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource show dummy-master")
+        ac(output, """\
+ Master: dummy-master
+  Resource: dummy (class=ocf provider=pacemaker type=Stateful)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
+        self.assertEqual(retVal, 0)
+
+        # disable via 'resource disable', enable via 'resource meta'
+        output, retVal = pcs(temp_cib, "resource disable dummy-master")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource show dummy-master")
+        ac(output, """\
+ Master: dummy-master
+  Meta Attrs: target-role=Stopped 
+  Resource: dummy (class=ocf provider=pacemaker type=Stateful)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
+
+        output, retVal = pcs(temp_cib, "resource meta dummy-master target-role=")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource show dummy-master")
+        ac(output, """\
+ Master: dummy-master
+  Resource: dummy (class=ocf provider=pacemaker type=Stateful)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
+
+        # disable via 'resource meta', enable via 'resource enable'
+        output, retVal = pcs(
+            temp_cib, "resource meta dummy-master target-role=Stopped"
+        )
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource show dummy-master")
+        ac(output, """\
+ Master: dummy-master
+  Meta Attrs: target-role=Stopped 
+  Resource: dummy (class=ocf provider=pacemaker type=Stateful)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
+
+        output, retVal = pcs(temp_cib, "resource enable dummy-master")
+        ac(output, "")
+        self.assertEqual(retVal, 0)
+
+        output, retVal = pcs(temp_cib, "resource show dummy-master")
+        ac(output, """\
+ Master: dummy-master
+  Resource: dummy (class=ocf provider=pacemaker type=Stateful)
+   Operations: monitor interval=60s (dummy-monitor-interval-60s)
+""")
 
     def testOPOption(self):
         o,r = pcs(temp_cib, "resource create --no-default-ops A Dummy op monitor interval=30s blah=blah")
@@ -2501,7 +3927,21 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
         assert r == 0
 
         o,r = pcs("resource --full")
-        ac(o," Resource: myip (class=ocf provider=heartbeat type=IPaddr2)\n  Operations: monitor interval=60s (myip-monitor-interval-60s)\n Resource: myip2 (class=ocf provider=heartbeat type=IPaddr2)\n  Attributes: ip=3.3.3.3 \n  Operations: monitor interval=60s (myip2-monitor-interval-60s)\n Resource: myfs (class=ocf provider=heartbeat type=Filesystem)\n  Operations: monitor interval=60s (myfs-monitor-interval-60s)\n Resource: myfs2 (class=ocf provider=heartbeat type=Filesystem)\n  Attr [...]
+        ac(o, """\
+ Resource: myip (class=ocf provider=heartbeat type=IPaddr2)
+  Operations: monitor interval=60s (myip-monitor-interval-60s)
+ Resource: myip2 (class=ocf provider=heartbeat type=IPaddr2)
+  Attributes: ip=3.3.3.3
+  Operations: monitor interval=60s (myip2-monitor-interval-60s)
+ Resource: myfs (class=ocf provider=heartbeat type=Filesystem)
+  Operations: monitor interval=60s (myfs-monitor-interval-60s)
+ Resource: myfs2 (class=ocf provider=heartbeat type=Filesystem)
+  Attributes: device=x directory=y
+  Operations: monitor interval=60s (myfs2-monitor-interval-60s)
+ Resource: myfs3 (class=ocf provider=heartbeat type=Filesystem)
+  Attributes: device=x directory=y fstype=z
+  Operations: monitor interval=60s (myfs3-monitor-interval-60s)
+""")
         assert r == 0
 
     def testDefaultOps(self):
@@ -2522,7 +3962,26 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
         assert r == 0
 
         o,r = pcs("resource --full")
-        ac(o," Resource: X0 (class=ocf provider=heartbeat type=Dummy)\n  Operations: start interval=0s timeout=20 (X0-start-interval-0s)\n              stop interval=0s timeout=20 (X0-stop-interval-0s)\n              monitor interval=10 timeout=20 (X0-monitor-interval-10)\n Resource: X1 (class=ocf provider=heartbeat type=Dummy)\n  Operations: start interval=0s timeout=20 (X1-start-interval-0s)\n              stop interval=0s timeout=20 (X1-stop-interval-0s)\n              monitor interva [...]
+        ac(o, """\
+ Resource: X0 (class=ocf provider=heartbeat type=Dummy)
+  Operations: start interval=0s timeout=20 (X0-start-interval-0s)
+              stop interval=0s timeout=20 (X0-stop-interval-0s)
+              monitor interval=10 timeout=20 (X0-monitor-interval-10)
+ Resource: X1 (class=ocf provider=heartbeat type=Dummy)
+  Operations: start interval=0s timeout=20 (X1-start-interval-0s)
+              stop interval=0s timeout=20 (X1-stop-interval-0s)
+              monitor interval=90s (X1-monitor-interval-90s)
+ Resource: X2 (class=ocf provider=heartbeat type=IPaddr2)
+  Attributes: ip=1.1.1.1
+  Operations: start interval=0s timeout=20s (X2-start-interval-0s)
+              stop interval=0s timeout=20s (X2-stop-interval-0s)
+              monitor interval=10s timeout=20s (X2-monitor-interval-10s)
+ Resource: X3 (class=ocf provider=heartbeat type=IPaddr2)
+  Attributes: ip=1.1.1.1
+  Operations: monitor interval=1s (X3-monitor-interval-1s)
+              start interval=0s timeout=1s (X3-start-interval-0s)
+              stop interval=0s timeout=1s (X3-stop-interval-0s)
+""")
         assert r == 0
 
     def testClonedMasteredGroup(self):
@@ -2547,7 +4006,12 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
         ac(output, "")
         assert retVal == 0
         output, retVal = pcs(temp_cib, "resource show")
-        ac(output, " Resource Group: dummies\n     dummy1\t(ocf::heartbeat:Dummy):\tStopped \n     dummy2\t(ocf::heartbeat:Dummy):\tStopped \n     dummy3\t(ocf::heartbeat:Dummy):\tStopped \n")
+        ac(output, """\
+ Resource Group: dummies
+     dummy1\t(ocf::heartbeat:Dummy):\tStopped
+     dummy2\t(ocf::heartbeat:Dummy):\tStopped
+     dummy3\t(ocf::heartbeat:Dummy):\tStopped
+""")
         assert retVal == 0
 
         output, retVal = pcs(temp_cib, "resource clone dummies")
@@ -2585,7 +4049,12 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
         ac(output, "")
         assert retVal == 0
         output, retVal = pcs(temp_cib, "resource show")
-        ac(output, " Resource Group: dummies\n     dummy1\t(ocf::heartbeat:Dummy):\tStopped \n     dummy2\t(ocf::heartbeat:Dummy):\tStopped \n     dummy3\t(ocf::heartbeat:Dummy):\tStopped \n")
+        ac(output, """\
+ Resource Group: dummies
+     dummy1\t(ocf::heartbeat:Dummy):\tStopped
+     dummy2\t(ocf::heartbeat:Dummy):\tStopped
+     dummy3\t(ocf::heartbeat:Dummy):\tStopped
+""")
         assert retVal == 0
 
         output, retVal = pcs(temp_cib, "resource master dummies")
@@ -2602,6 +4071,320 @@ Error: role must be: Stopped, Started, Slave or Master (use --force to override)
         ac(output, "NO resources configured\n")
         assert retVal == 0
 
+    def test_relocate_stickiness(self):
+        output, retVal = pcs(
+            temp_cib, "resource create D1 dummy --no-default-ops"
+        )
+        self.assertEqual(0, retVal)
+        ac(output, "")
+        output, retVal = pcs(
+            temp_cib, "resource create DG1 dummy --no-default-ops --group GR"
+        )
+        self.assertEqual(0, retVal)
+        ac(output, "")
+        output, retVal = pcs(
+            temp_cib, "resource create DG2 dummy --no-default-ops --group GR"
+        )
+        self.assertEqual(0, retVal)
+        ac(output, "")
+        output, retVal = pcs(
+            temp_cib, "resource create DC dummy --no-default-ops --clone"
+        )
+        self.assertEqual(0, retVal)
+        ac(output, "")
+        output, retVal = pcs(
+            temp_cib, "resource create DGC1 dummy --no-default-ops --group GRC"
+        )
+        self.assertEqual(0, retVal)
+        ac(output, "")
+        output, retVal = pcs(
+            temp_cib, "resource create DGC2 dummy --no-default-ops --group GRC"
+        )
+        self.assertEqual(0, retVal)
+        ac(output, "")
+        output, retVal = pcs(temp_cib, "resource clone GRC")
+        self.assertEqual(0, retVal)
+        ac(output, "")
+
+        status = """\
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+  Operations: monitor interval=60s (D1-monitor-interval-60s)
+ Group: GR
+  Resource: DG1 (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (DG1-monitor-interval-60s)
+  Resource: DG2 (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (DG2-monitor-interval-60s)
+ Clone: DC-clone
+  Resource: DC (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (DC-monitor-interval-60s)
+ Clone: GRC-clone
+  Group: GRC
+   Resource: DGC1 (class=ocf provider=heartbeat type=Dummy)
+    Operations: monitor interval=60s (DGC1-monitor-interval-60s)
+   Resource: DGC2 (class=ocf provider=heartbeat type=Dummy)
+    Operations: monitor interval=60s (DGC2-monitor-interval-60s)
+"""
+        cib_original, retVal = pcs(temp_cib, "cluster cib")
+        self.assertEqual(0, retVal)
+
+        resources = set([
+            "D1", "DG1", "DG2", "GR", "DC", "DC-clone", "DGC1", "DGC2", "GRC",
+            "GRC-clone"
+        ])
+        output, retVal = pcs(temp_cib, "resource --full")
+        ac(output, status)
+        self.assertEqual(0, retVal)
+        cib_in = utils.parseString(cib_original)
+        cib_out, updated_resources = resource.resource_relocate_set_stickiness(
+            cib_in
+        )
+        self.assertFalse(cib_in is cib_out)
+        self.assertEqual(resources, updated_resources)
+        output, retVal = pcs(temp_cib, "resource --full")
+        ac(output, status)
+        self.assertEqual(0, retVal)
+        with open(temp_cib, "w") as f:
+            f.write(cib_out.toxml())
+        output, retVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+  Meta Attrs: resource-stickiness=0 
+  Operations: monitor interval=60s (D1-monitor-interval-60s)
+ Group: GR
+  Meta Attrs: resource-stickiness=0 
+  Resource: DG1 (class=ocf provider=heartbeat type=Dummy)
+   Meta Attrs: resource-stickiness=0 
+   Operations: monitor interval=60s (DG1-monitor-interval-60s)
+  Resource: DG2 (class=ocf provider=heartbeat type=Dummy)
+   Meta Attrs: resource-stickiness=0 
+   Operations: monitor interval=60s (DG2-monitor-interval-60s)
+ Clone: DC-clone
+  Meta Attrs: resource-stickiness=0 
+  Resource: DC (class=ocf provider=heartbeat type=Dummy)
+   Meta Attrs: resource-stickiness=0 
+   Operations: monitor interval=60s (DC-monitor-interval-60s)
+ Clone: GRC-clone
+  Meta Attrs: resource-stickiness=0 
+  Group: GRC
+   Meta Attrs: resource-stickiness=0 
+   Resource: DGC1 (class=ocf provider=heartbeat type=Dummy)
+    Meta Attrs: resource-stickiness=0 
+    Operations: monitor interval=60s (DGC1-monitor-interval-60s)
+   Resource: DGC2 (class=ocf provider=heartbeat type=Dummy)
+    Meta Attrs: resource-stickiness=0 
+    Operations: monitor interval=60s (DGC2-monitor-interval-60s)
+""")
+        self.assertEqual(0, retVal)
+
+        resources = set(["D1", "DG1", "DC", "DGC1"])
+        with open(temp_cib, "w") as f:
+            f.write(cib_original)
+        output, retVal = pcs(temp_cib, "resource --full")
+        ac(output, status)
+        self.assertEqual(0, retVal)
+        cib_in = utils.parseString(cib_original)
+        cib_out, updated_resources = resource.resource_relocate_set_stickiness(
+            cib_in, resources
+        )
+        self.assertFalse(cib_in is cib_out)
+        self.assertEqual(resources, updated_resources)
+        output, retVal = pcs(temp_cib, "resource --full")
+        ac(output, status)
+        self.assertEqual(0, retVal)
+        with open(temp_cib, "w") as f:
+            f.write(cib_out.toxml())
+        output, retVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+  Meta Attrs: resource-stickiness=0 
+  Operations: monitor interval=60s (D1-monitor-interval-60s)
+ Group: GR
+  Resource: DG1 (class=ocf provider=heartbeat type=Dummy)
+   Meta Attrs: resource-stickiness=0 
+   Operations: monitor interval=60s (DG1-monitor-interval-60s)
+  Resource: DG2 (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (DG2-monitor-interval-60s)
+ Clone: DC-clone
+  Resource: DC (class=ocf provider=heartbeat type=Dummy)
+   Meta Attrs: resource-stickiness=0 
+   Operations: monitor interval=60s (DC-monitor-interval-60s)
+ Clone: GRC-clone
+  Group: GRC
+   Resource: DGC1 (class=ocf provider=heartbeat type=Dummy)
+    Meta Attrs: resource-stickiness=0 
+    Operations: monitor interval=60s (DGC1-monitor-interval-60s)
+   Resource: DGC2 (class=ocf provider=heartbeat type=Dummy)
+    Operations: monitor interval=60s (DGC2-monitor-interval-60s)
+""")
+        self.assertEqual(0, retVal)
+
+        resources = set(["GRC-clone", "GRC", "DGC1", "DGC2"])
+        with open(temp_cib, "w") as f:
+            f.write(cib_original)
+        output, retVal = pcs(temp_cib, "resource --full")
+        ac(output, status)
+        self.assertEqual(0, retVal)
+        cib_in = utils.parseString(cib_original)
+        cib_out, updated_resources = resource.resource_relocate_set_stickiness(
+            cib_in, ["GRC-clone"]
+        )
+        self.assertFalse(cib_in is cib_out)
+        self.assertEqual(resources, updated_resources)
+        output, retVal = pcs(temp_cib, "resource --full")
+        ac(output, status)
+        self.assertEqual(0, retVal)
+        with open(temp_cib, "w") as f:
+            f.write(cib_out.toxml())
+        output, retVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+  Operations: monitor interval=60s (D1-monitor-interval-60s)
+ Group: GR
+  Resource: DG1 (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (DG1-monitor-interval-60s)
+  Resource: DG2 (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (DG2-monitor-interval-60s)
+ Clone: DC-clone
+  Resource: DC (class=ocf provider=heartbeat type=Dummy)
+   Operations: monitor interval=60s (DC-monitor-interval-60s)
+ Clone: GRC-clone
+  Meta Attrs: resource-stickiness=0 
+  Group: GRC
+   Meta Attrs: resource-stickiness=0 
+   Resource: DGC1 (class=ocf provider=heartbeat type=Dummy)
+    Meta Attrs: resource-stickiness=0 
+    Operations: monitor interval=60s (DGC1-monitor-interval-60s)
+   Resource: DGC2 (class=ocf provider=heartbeat type=Dummy)
+    Meta Attrs: resource-stickiness=0 
+    Operations: monitor interval=60s (DGC2-monitor-interval-60s)
+""")
+        self.assertEqual(0, retVal)
+
+        resources = set(["GR", "DG1", "DG2", "DC-clone", "DC"])
+        with open(temp_cib, "w") as f:
+            f.write(cib_original)
+        output, retVal = pcs(temp_cib, "resource --full")
+        ac(output, status)
+        self.assertEqual(0, retVal)
+        cib_in = utils.parseString(cib_original)
+        cib_out, updated_resources = resource.resource_relocate_set_stickiness(
+            cib_in, ["GR", "DC-clone"]
+        )
+        self.assertFalse(cib_in is cib_out)
+        self.assertEqual(resources, updated_resources)
+        output, retVal = pcs(temp_cib, "resource --full")
+        ac(output, status)
+        self.assertEqual(0, retVal)
+        with open(temp_cib, "w") as f:
+            f.write(cib_out.toxml())
+        output, retVal = pcs(temp_cib, "resource --full")
+        ac(output, """\
+ Resource: D1 (class=ocf provider=heartbeat type=Dummy)
+  Operations: monitor interval=60s (D1-monitor-interval-60s)
+ Group: GR
+  Meta Attrs: resource-stickiness=0 
+  Resource: DG1 (class=ocf provider=heartbeat type=Dummy)
+   Meta Attrs: resource-stickiness=0 
+   Operations: monitor interval=60s (DG1-monitor-interval-60s)
+  Resource: DG2 (class=ocf provider=heartbeat type=Dummy)
+   Meta Attrs: resource-stickiness=0 
+   Operations: monitor interval=60s (DG2-monitor-interval-60s)
+ Clone: DC-clone
+  Meta Attrs: resource-stickiness=0 
+  Resource: DC (class=ocf provider=heartbeat type=Dummy)
+   Meta Attrs: resource-stickiness=0 
+   Operations: monitor interval=60s (DC-monitor-interval-60s)
+ Clone: GRC-clone
+  Group: GRC
+   Resource: DGC1 (class=ocf provider=heartbeat type=Dummy)
+    Operations: monitor interval=60s (DGC1-monitor-interval-60s)
+   Resource: DGC2 (class=ocf provider=heartbeat type=Dummy)
+    Operations: monitor interval=60s (DGC2-monitor-interval-60s)
+""")
+        self.assertEqual(0, retVal)
+
+    def testResrourceUtilizationSet(self):
+        output, returnVal = pcs(
+            temp_large_cib, "resource utilization dummy test1=10"
+        )
+        ac("", output)
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_large_cib, "resource utilization dummy1")
+        expected_out = """\
+Resource Utilization:
+ dummy1: \n"""
+        ac(expected_out, output)
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_large_cib, "resource utilization dummy")
+        expected_out = """\
+Resource Utilization:
+ dummy: test1=10
+"""
+        ac(expected_out, output)
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_large_cib, "resource utilization dummy test1=-10 test4=1234"
+        )
+        ac("", output)
+        self.assertEqual(0, returnVal)
+        output, returnVal = pcs(temp_large_cib, "resource utilization dummy")
+        expected_out = """\
+Resource Utilization:
+ dummy: test1=-10 test4=1234
+"""
+        ac(expected_out, output)
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_large_cib, "resource utilization dummy1 test2=321 empty="
+        )
+        ac("", output)
+        self.assertEqual(0, returnVal)
+        output, returnVal = pcs(temp_large_cib, "resource utilization dummy1")
+        expected_out = """\
+Resource Utilization:
+ dummy1: test2=321
+"""
+        ac(expected_out, output)
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_large_cib, "resource utilization")
+        expected_out = """\
+Resource Utilization:
+ dummy: test1=-10 test4=1234
+ dummy1: test2=321
+"""
+        ac(expected_out, output)
+        self.assertEqual(0, returnVal)
+
+def test_resource_utilization_set_invalid(self):
+        output, returnVal = pcs(temp_large_cib, "resource utilization dummy0")
+        expected_out = """\
+Error: Unable to find a resource: dummy0
+"""
+        ac(expected_out, output)
+        self.assertEqual(1, returnVal)
+
+        output, returnVal = pcs(
+            temp_large_cib, "resource utilization dummy0 test=10"
+        )
+        expected_out = """\
+Error: Unable to find a resource: dummy0
+"""
+        ac(expected_out, output)
+        self.assertEqual(1, returnVal)
+
+        output, returnVal = pcs(
+            temp_large_cib, "resource utilization dummy1 test1=10 test=int"
+        )
+        expected_out = """\
+Error: Value of utilization attribute must be integer: 'test=int'
+"""
+        ac(expected_out, output)
+        self.assertEqual(1, returnVal)
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/pcs/test/test_rule.py b/pcs/test/test_rule.py
index b23632a..17450e5 100644
--- a/pcs/test/test_rule.py
+++ b/pcs/test/test_rule.py
@@ -1,13 +1,20 @@
-import os
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os.path
 import sys
 import shutil
 import unittest
 import xml.dom.minidom
 parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
 sys.path.insert(0, parentdir)
-from pcs_test_functions import pcs,ac
+
+from pcs_test_functions import pcs, ac
 import rule
 
+
 empty_cib = "empty.xml"
 temp_cib = "temp.xml"
 
@@ -15,7 +22,7 @@ class DateValueTest(unittest.TestCase):
 
     def testParse(self):
         for value, item in enumerate(rule.DateCommonValue.allowed_items, 1):
-            self.assertEquals(
+            self.assertEqual(
                 str(value),
                 rule.DateCommonValue("%s=%s" % (item, value)).parts[item]
             )
@@ -24,23 +31,23 @@ class DateValueTest(unittest.TestCase):
             "hours=1 monthdays=2 weekdays=3 yeardays=4 months=5 weeks=6 "
             "years=7 weekyears=8 moon=9"
         )
-        self.assertEquals("1", value.parts["hours"])
-        self.assertEquals("2", value.parts["monthdays"])
-        self.assertEquals("3", value.parts["weekdays"])
-        self.assertEquals("4", value.parts["yeardays"])
-        self.assertEquals("5", value.parts["months"])
-        self.assertEquals("6", value.parts["weeks"])
-        self.assertEquals("7", value.parts["years"])
-        self.assertEquals("8", value.parts["weekyears"])
-        self.assertEquals("9", value.parts["moon"])
+        self.assertEqual("1", value.parts["hours"])
+        self.assertEqual("2", value.parts["monthdays"])
+        self.assertEqual("3", value.parts["weekdays"])
+        self.assertEqual("4", value.parts["yeardays"])
+        self.assertEqual("5", value.parts["months"])
+        self.assertEqual("6", value.parts["weeks"])
+        self.assertEqual("7", value.parts["years"])
+        self.assertEqual("8", value.parts["weekyears"])
+        self.assertEqual("9", value.parts["moon"])
 
         value = rule.DateCommonValue("hours=1 monthdays=2 hours=3")
-        self.assertEquals("2", value.parts["monthdays"])
-        self.assertEquals("3", value.parts["hours"])
+        self.assertEqual("2", value.parts["monthdays"])
+        self.assertEqual("3", value.parts["hours"])
 
         value = rule.DateCommonValue(" hours=1   monthdays=2   hours=3 ")
-        self.assertEquals("2", value.parts["monthdays"])
-        self.assertEquals("3", value.parts["hours"])
+        self.assertEqual("2", value.parts["monthdays"])
+        self.assertEqual("3", value.parts["hours"])
 
         self.assertSyntaxError(
             "missing one of 'hours=', 'monthdays=', 'weekdays=', 'yeardays=', "
@@ -71,7 +78,7 @@ class DateValueTest(unittest.TestCase):
 
     def testDurationValidate(self):
         for value, item in enumerate(rule.DateCommonValue.allowed_items, 1):
-            self.assertEquals(
+            self.assertEqual(
                 str(value),
                 rule.DateDurationValue("%s=%s" % (item, value)).parts[item]
             )
@@ -95,17 +102,17 @@ class DateValueTest(unittest.TestCase):
     def testDateSpecValidation(self):
         for item in rule.DateCommonValue.allowed_items:
             value = 1
-            self.assertEquals(
+            self.assertEqual(
                 str(value),
                 rule.DateSpecValue("%s=%s" % (item, value)).parts[item]
             )
-            self.assertEquals(
+            self.assertEqual(
                 "%s-%s" % (value, value + 1),
                 rule.DateSpecValue(
                     "%s=%s-%s" % (item, value, value + 1)
                 ).parts[item]
             )
-        self.assertEquals(
+        self.assertEqual(
             "hours=9-16 weekdays=1-5",
             str(rule.DateSpecValue("hours=9-16 weekdays=1-5"))
         )
@@ -187,7 +194,7 @@ class DateValueTest(unittest.TestCase):
         try:
             value_class(parts_string)
         except rule.SyntaxError as e:
-            self.assertEquals(syntax_error, str(e))
+            self.assertEqual(syntax_error, str(e))
 
 
 class ParserTest(unittest.TestCase):
@@ -211,12 +218,12 @@ class ParserTest(unittest.TestCase):
         )
 
     def testSingleLiteralDatespec(self):
-        self.assertEquals(
+        self.assertEqual(
             "(date-spec (literal hours=1))",
             str(self.parser.parse(["date-spec", "hours=1"]))
         )
-        self.assertEquals(
-            "(date-spec (literal hours=1-14 months=1 monthdays=20-30))",
+        self.assertEqual(
+            "(date-spec (literal hours=1-14 monthdays=20-30 months=1))",
             str(self.parser.parse([
                 "date-spec", "hours=1-14 months=1 monthdays=20-30"
             ]))
@@ -224,27 +231,27 @@ class ParserTest(unittest.TestCase):
         self.assertUnexpectedEndOfInput(["date-spec"])
 
     def testSimpleExpression(self):
-        self.assertEquals(
+        self.assertEqual(
             "(eq (literal #uname) (literal node1))",
             str(self.parser.parse(["#uname", "eq", "node1"]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(ne (literal #uname) (literal node2))",
             str(self.parser.parse(["#uname", "ne", "node2"]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(gt (literal int) (literal 123))",
             str(self.parser.parse(["int", "gt", "123"]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(gte (literal int) (literal 123))",
             str(self.parser.parse(["int", "gte", "123"]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(lt (literal int) (literal 123))",
             str(self.parser.parse(["int", "lt", "123"]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(lte (literal int) (literal 123))",
             str(self.parser.parse(["int", "lte", "123"]))
         )
@@ -289,11 +296,11 @@ class ParserTest(unittest.TestCase):
         )
 
     def testDefinedExpression(self):
-        self.assertEquals(
+        self.assertEqual(
             "(defined (literal pingd))",
             str(self.parser.parse(["defined", "pingd"]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(not_defined (literal pingd))",
             str(self.parser.parse(["not_defined", "pingd"]))
         )
@@ -323,35 +330,35 @@ class ParserTest(unittest.TestCase):
         )
 
     def testTypeExpression(self):
-        self.assertEquals(
+        self.assertEqual(
             "(eq (literal #uname) (string (literal node1)))",
             str(self.parser.parse(["#uname", "eq", "string", "node1"]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(eq (literal #uname) (integer (literal 12345)))",
             str(self.parser.parse(["#uname", "eq", "integer", "12345"]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(eq (literal #uname) (integer (literal -12345)))",
             str(self.parser.parse(["#uname", "eq", "integer", "-12345"]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(eq (literal #uname) (version (literal 1)))",
             str(self.parser.parse(["#uname", "eq", "version", "1"]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(eq (literal #uname) (version (literal 1.2.3)))",
             str(self.parser.parse(["#uname", "eq", "version", "1.2.3"]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(eq (literal #uname) (string (literal string)))",
             str(self.parser.parse(["#uname", "eq", "string", "string"]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(eq (literal #uname) (string (literal and)))",
             str(self.parser.parse(["#uname", "eq", "string", "and"]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(and "
                 "(ne (literal #uname) (string (literal integer))) "
                 "(ne (literal #uname) (string (literal version)))"
@@ -380,15 +387,15 @@ class ParserTest(unittest.TestCase):
         )
 
     def testDateExpression(self):
-        self.assertEquals(
+        self.assertEqual(
             "(gt (literal date) (literal 2014-06-26))",
             str(self.parser.parse(["date", "gt", "2014-06-26"]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(lt (literal date) (literal 2014-06-26))",
             str(self.parser.parse(["date", "lt", "2014-06-26"]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(in_range "
                 "(literal date) (literal 2014-06-26) (literal 2014-07-26)"
             ")",
@@ -396,7 +403,7 @@ class ParserTest(unittest.TestCase):
                 "date", "in_range", "2014-06-26", "to", "2014-07-26"
             ]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(in_range "
                 "(literal date) "
                 "(literal 2014-06-26) (duration (literal years=1))"
@@ -467,7 +474,7 @@ class ParserTest(unittest.TestCase):
         )
 
     def testAndOrExpression(self):
-        self.assertEquals(
+        self.assertEqual(
             "(and "
                 "(ne (literal #uname) (literal node1)) "
                 "(ne (literal #uname) (literal node2))"
@@ -476,7 +483,7 @@ class ParserTest(unittest.TestCase):
                 "#uname", "ne", "node1", "and", "#uname", "ne", "node2"
             ]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(or "
                 "(eq (literal #uname) (literal node1)) "
                 "(eq (literal #uname) (literal node2))"
@@ -485,7 +492,7 @@ class ParserTest(unittest.TestCase):
                 "#uname", "eq", "node1", "or", "#uname", "eq", "node2"
             ]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(and "
                 "(and "
                     "(ne (literal #uname) (literal node1)) "
@@ -499,7 +506,7 @@ class ParserTest(unittest.TestCase):
                 "and", "#uname", "ne", "node3"
             ]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(or "
                 "(and "
                     "(ne (literal #uname) (literal node1)) "
@@ -513,7 +520,7 @@ class ParserTest(unittest.TestCase):
                 "or", "#uname", "eq", "node3"
             ]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(and "
                 "(or "
                     "(eq (literal #uname) (literal node1)) "
@@ -527,7 +534,7 @@ class ParserTest(unittest.TestCase):
                 "and", "#uname", "ne", "node3"
             ]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(and "
                 "(defined (literal pingd)) "
                 "(lte (literal pingd) (literal 1))"
@@ -536,7 +543,7 @@ class ParserTest(unittest.TestCase):
                 "defined", "pingd", "and", "pingd", "lte", "1"
             ]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(or "
                 "(gt (literal pingd) (literal 1)) "
                 "(not_defined (literal pingd))"
@@ -547,7 +554,7 @@ class ParserTest(unittest.TestCase):
         )
 
     def testAndOrExpressionDateSpec(self):
-        self.assertEquals(
+        self.assertEqual(
             "(and "
                 "(ne (literal #uname) (literal node1)) "
                 "(date-spec (literal hours=1-12))"
@@ -556,7 +563,7 @@ class ParserTest(unittest.TestCase):
                 "#uname", "ne", "node1", "and", "date-spec", "hours=1-12"
             ]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(or "
                 "(date-spec (literal monthdays=1-12)) "
                 "(ne (literal #uname) (literal node1))"
@@ -565,7 +572,7 @@ class ParserTest(unittest.TestCase):
                 "date-spec", "monthdays=1-12", "or", "#uname", "ne", "node1"
             ]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(or "
                 "(date-spec (literal monthdays=1-10)) "
                 "(date-spec (literal monthdays=11-20))"
@@ -578,7 +585,7 @@ class ParserTest(unittest.TestCase):
         )
 
     def testAndOrExpressionDate(self):
-        self.assertEquals(
+        self.assertEqual(
             "(and "
                 "(ne (literal #uname) (literal node1)) "
                 "(in_range "
@@ -591,7 +598,7 @@ class ParserTest(unittest.TestCase):
                 "date", "in_range", "2014-06-26", "to", "2014-07-26"
             ]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(and "
                 "(in_range "
                     "(literal date) (literal 2014-06-26) (literal 2014-07-26)"
@@ -653,19 +660,19 @@ class ParserTest(unittest.TestCase):
             "'defined', 'not_defined', 'date-spec'",
             ["(", "#uname", ")"]
         )
-        self.assertEquals(
+        self.assertEqual(
             "(date-spec (literal hours=1))",
             str(self.parser.parse(["(", "date-spec", "hours=1", ")"]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(eq (literal #uname) (literal node1))",
             str(self.parser.parse(["(", "#uname", "eq", "node1", ")"]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(defined (literal pingd))",
             str(self.parser.parse(["(", "defined", "pingd", ")"]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(and "
                 "(ne (literal #uname) (literal node1)) "
                 "(ne (literal #uname) (literal node2))"
@@ -676,7 +683,7 @@ class ParserTest(unittest.TestCase):
                 ")"
             ]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(and "
                 "(ne (literal #uname) (literal node1)) "
                 "(ne (literal #uname) (literal node2))"
@@ -687,7 +694,7 @@ class ParserTest(unittest.TestCase):
                 "(", "#uname", "ne", "node2", ")"
             ]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(or "
                 "(and "
                     "(ne (literal #uname) (literal node1)) "
@@ -702,7 +709,7 @@ class ParserTest(unittest.TestCase):
                 "or", "#uname", "eq", "node3"
             ]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(and "
                 "(ne (literal #uname) (literal node1)) "
                 "(or "
@@ -716,7 +723,7 @@ class ParserTest(unittest.TestCase):
                 "(", "#uname", "ne", "node2", "or", "#uname", "eq", "node3", ")"
             ]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(and "
                 "(ne (literal #uname) (literal node1)) "
                 "(or "
@@ -736,7 +743,7 @@ class ParserTest(unittest.TestCase):
                 ")", ")"
             ]))
         )
-        self.assertEquals(
+        self.assertEqual(
             "(in_range "
                 "(literal date) (literal 2014-06-26) (literal 2014-07-26)"
             ")",
@@ -771,7 +778,7 @@ class ParserTest(unittest.TestCase):
         try:
             self.parser.parse(program)
         except rule.SyntaxError as e:
-            self.assertEquals(syntax_error, str(e))
+            self.assertEqual(syntax_error, str(e))
 
 
 class CibBuilderTest(unittest.TestCase):
@@ -1256,36 +1263,36 @@ class TokenPreprocessorTest(unittest.TestCase):
         self.preprocessor = rule.TokenPreprocessor()
 
     def testNoChanges(self):
-        self.assertEquals([], self.preprocessor.run([]))
-        self.assertEquals(
+        self.assertEqual([], self.preprocessor.run([]))
+        self.assertEqual(
             ["#uname", "eq", "node1"],
             self.preprocessor.run(["#uname", "eq", "node1"])
         )
 
     def testDateSpec(self):
-        self.assertEquals(
+        self.assertEqual(
             ["date-spec"],
             self.preprocessor.run(["date-spec"])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date-spec", "hours=14"],
             self.preprocessor.run(["date-spec", "hours=14"])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date-spec", "hours weeks=6 months= moon=1"],
             self.preprocessor.run(
                 ["date-spec", "hours", "weeks=6", "months=", "moon=1"]
             )
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date-spec", "foo", "hours=14"],
             self.preprocessor.run(["date-spec", "foo", "hours=14"])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date-spec", "hours=14", "foo", "hours=14"],
             self.preprocessor.run(["date-spec", "hours=14", "foo", "hours=14"])
         )
-        self.assertEquals(
+        self.assertEqual(
             [
                 "date-spec",
                 "hours=1 monthdays=2 weekdays=3 yeardays=4 months=5 "
@@ -1297,13 +1304,13 @@ class TokenPreprocessorTest(unittest.TestCase):
                 "months=5","weeks=6", "years=7", "weekyears=8", "moon=9"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["#uname", "eq", "node1", "or", "date-spec", "hours=14"],
             self.preprocessor.run([
                 "#uname", "eq", "node1", "or", "date-spec", "hours=14"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date-spec", "hours=14", "or", "#uname", "eq", "node1"],
             self.preprocessor.run([
                 "date-spec", "hours=14", "or", "#uname", "eq", "node1",
@@ -1311,29 +1318,29 @@ class TokenPreprocessorTest(unittest.TestCase):
         )
 
     def testDuration(self):
-        self.assertEquals(
+        self.assertEqual(
             ["duration"],
             self.preprocessor.run(["duration"])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["duration", "hours=14"],
             self.preprocessor.run(["duration", "hours=14"])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["duration", "hours weeks=6 months= moon=1"],
             self.preprocessor.run(
                 ["duration", "hours", "weeks=6", "months=", "moon=1"]
             )
         )
-        self.assertEquals(
+        self.assertEqual(
             ["duration", "foo", "hours=14"],
             self.preprocessor.run(["duration", "foo", "hours=14"])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["duration", "hours=14", "foo", "hours=14"],
             self.preprocessor.run(["duration", "hours=14", "foo", "hours=14"])
         )
-        self.assertEquals(
+        self.assertEqual(
             [
                 "duration",
                 "hours=1 monthdays=2 weekdays=3 yeardays=4 months=5 "
@@ -1345,13 +1352,13 @@ class TokenPreprocessorTest(unittest.TestCase):
                 "months=5","weeks=6", "years=7", "weekyears=8", "moon=9"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["#uname", "eq", "node1", "or", "duration", "hours=14"],
             self.preprocessor.run([
                 "#uname", "eq", "node1", "or", "duration", "hours=14"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["duration", "hours=14", "or", "#uname", "eq", "node1"],
             self.preprocessor.run([
                 "duration", "hours=14", "or", "#uname", "eq", "node1",
@@ -1359,49 +1366,49 @@ class TokenPreprocessorTest(unittest.TestCase):
         )
 
     def testOperationDatespec(self):
-        self.assertEquals(
+        self.assertEqual(
             ["date-spec", "weeks=6 moon=1"],
             self.preprocessor.run(
                 ["date-spec", "operation=date_spec", "weeks=6", "moon=1"]
             )
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date-spec", "weeks=6 moon=1"],
             self.preprocessor.run(
                 ["date-spec", "weeks=6", "operation=date_spec", "moon=1"]
             )
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date-spec", "weeks=6", "foo", "moon=1"],
             self.preprocessor.run(
                 ["date-spec", "weeks=6", "operation=date_spec", "foo", "moon=1"]
             )
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date-spec", "weeks=6", "foo", "operation=date_spec", "moon=1"],
             self.preprocessor.run(
                 ["date-spec", "weeks=6", "foo", "operation=date_spec", "moon=1"]
             )
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date-spec", "weeks=6 moon=1"],
             self.preprocessor.run(
                 ["date-spec", "weeks=6", "moon=1", "operation=date_spec"]
             )
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date-spec", "weeks=6 moon=1", "foo"],
             self.preprocessor.run(
                 ["date-spec", "weeks=6", "moon=1", "operation=date_spec", "foo"]
             )
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date-spec"],
             self.preprocessor.run(
                 ["date-spec", "operation=date_spec"]
             )
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date-spec", "weeks=6", "operation=foo", "moon=1"],
             self.preprocessor.run(
                 ["date-spec", "weeks=6", "operation=foo", "moon=1"]
@@ -1410,50 +1417,50 @@ class TokenPreprocessorTest(unittest.TestCase):
 
     def testDateLegacySyntax(self):
         # valid syntax
-        self.assertEquals(
+        self.assertEqual(
             ["date", "gt", "2014-06-26"],
             self.preprocessor.run([
                 "date", "start=2014-06-26", "gt"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date", "lt", "2014-06-26"],
             self.preprocessor.run([
                 "date", "end=2014-06-26", "lt"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date", "in_range", "2014-06-26", "to", "2014-07-26"],
             self.preprocessor.run([
                 "date", "start=2014-06-26", "end=2014-07-26", "in_range"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date", "in_range", "2014-06-26", "to", "2014-07-26"],
             self.preprocessor.run([
                 "date", "end=2014-07-26", "start=2014-06-26", "in_range"
             ])
         )
 
-        self.assertEquals(
+        self.assertEqual(
             ["date", "gt", "2014-06-26", "foo"],
             self.preprocessor.run([
                 "date", "start=2014-06-26", "gt", "foo"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date", "lt", "2014-06-26", "foo"],
             self.preprocessor.run([
                 "date", "end=2014-06-26", "lt", "foo"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date", "in_range", "2014-06-26", "to", "2014-07-26", "foo"],
             self.preprocessor.run([
                 "date", "start=2014-06-26", "end=2014-07-26", "in_range", "foo"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date", "in_range", "2014-06-26", "to", "2014-07-26", "foo"],
             self.preprocessor.run([
                 "date", "end=2014-07-26", "start=2014-06-26", "in_range", "foo"
@@ -1461,73 +1468,73 @@ class TokenPreprocessorTest(unittest.TestCase):
         )
 
         # invalid syntax - no change
-        self.assertEquals(
+        self.assertEqual(
             ["date"],
             self.preprocessor.run([
                 "date"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date", "start=2014-06-26"],
             self.preprocessor.run([
                 "date", "start=2014-06-26"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date", "end=2014-06-26"],
             self.preprocessor.run([
                 "date", "end=2014-06-26"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date", "start=2014-06-26", "end=2014-07-26"],
             self.preprocessor.run([
                 "date", "start=2014-06-26", "end=2014-07-26"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date", "start=2014-06-26", "end=2014-07-26", "lt"],
             self.preprocessor.run([
                 "date", "start=2014-06-26", "end=2014-07-26", "lt"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date", "start=2014-06-26", "lt", "foo"],
             self.preprocessor.run([
                 "date", "start=2014-06-26", "lt", "foo"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date", "start=2014-06-26", "end=2014-07-26", "gt", "foo"],
             self.preprocessor.run([
                 "date", "start=2014-06-26", "end=2014-07-26", "gt", "foo"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date", "end=2014-06-26", "gt"],
             self.preprocessor.run([
                 "date", "end=2014-06-26", "gt"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date", "start=2014-06-26", "in_range", "foo"],
             self.preprocessor.run([
                 "date", "start=2014-06-26", "in_range", "foo"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["date", "end=2014-07-26", "in_range"],
             self.preprocessor.run([
                 "date", "end=2014-07-26", "in_range"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["foo", "start=2014-06-26", "gt"],
             self.preprocessor.run([
                 "foo", "start=2014-06-26", "gt"
             ])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["foo", "end=2014-06-26", "lt"],
             self.preprocessor.run([
                 "foo", "end=2014-06-26", "lt"
@@ -1535,59 +1542,59 @@ class TokenPreprocessorTest(unittest.TestCase):
         )
 
     def testParenthesis(self):
-        self.assertEquals(
+        self.assertEqual(
             ["("],
             self.preprocessor.run(["("])
         )
-        self.assertEquals(
+        self.assertEqual(
             [")"],
             self.preprocessor.run([")"])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["(", "(", ")", ")"],
             self.preprocessor.run(["(", "(", ")", ")"])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["(", "(", ")", ")"],
             self.preprocessor.run(["(())"])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["a", "(", "b", ")", "c"],
             self.preprocessor.run(["a", "(", "b", ")", "c"])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["a", "(", "b", "c", ")", "d"],
             self.preprocessor.run(["a", "(", "b", "c", ")", "d"])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["a", ")", "b", "(", "c"],
             self.preprocessor.run(["a", ")", "b", "(", "c"])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["a", "(", "b", ")", "c"],
             self.preprocessor.run(["a", "(b)", "c"])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["a", "(", "b", ")", "c"],
             self.preprocessor.run(["a(", "b", ")c"])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["a", "(", "b", ")", "c"],
             self.preprocessor.run(["a(b)c"])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["aA", "(", "bB", ")", "cC"],
             self.preprocessor.run(["aA(bB)cC"])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["(", "aA", "(", "bB", ")", "cC", ")"],
             self.preprocessor.run(["(aA(bB)cC)"])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["(", "aA", "(", "(", "bB", ")", "cC", ")"],
             self.preprocessor.run(["(aA(", "(bB)cC)"])
         )
-        self.assertEquals(
+        self.assertEqual(
             ["(", "aA", "(", "(", "(", "bB", ")", "cC", ")"],
             self.preprocessor.run(["(aA(", "(", "(bB)cC)"])
         )
@@ -1748,21 +1755,21 @@ class DomRuleAddTest(unittest.TestCase):
             "constraint location dummy1 rule #uname eq node1"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "constraint location dummy1 rule id=MyRule score=100 role=master #uname eq node2"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "constraint location dummy1 rule id=complexRule (#uname eq node3 and foo gt version 1.2) or (date-spec hours=12-23 weekdays=1-5 and date in_range 2014-07-26 to duration months=1)"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint location show --full")
         ac(output, """\
@@ -1785,7 +1792,7 @@ Location Constraints:
           Expression: date in_range 2014-07-26 to duration  (id:complexRule-rule-1-expr-1)
             Duration: months=1  (id:complexRule-rule-1-expr-1-duration)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint location show")
         ac(output, """\
@@ -1808,7 +1815,7 @@ Location Constraints:
           Expression: date in_range 2014-07-26 to duration
             Duration: months=1
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
     def test_invalid_score(self):
         output, returnVal = pcs(
@@ -1820,7 +1827,7 @@ Location Constraints:
             "Warning: invalid score 'pingd', setting score-attribute=pingd "
                 "instead\n"
         )
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint location show --full")
         ac(output, """\
@@ -1830,7 +1837,7 @@ Location Constraints:
       Rule: score-attribute=pingd  (id:location-dummy1-rule)
         Expression: defined pingd  (id:location-dummy1-rule-expr)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
     def test_invalid_rule(self):
         output, returnVal = pcs(
@@ -1838,7 +1845,7 @@ Location Constraints:
             "constraint location dummy1 rule score=100"
         )
         ac(output, "Error: no rule expression was specified\n")
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -1849,7 +1856,7 @@ Location Constraints:
             "Error: '#uname eq' is not a valid rule expression: unexpected end "
                 "of rule\n"
         )
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -1860,7 +1867,7 @@ Location Constraints:
             "Error: 'string #uname eq node1' is not a valid rule expression: "
                 "unexpected 'string' before 'eq'\n"
         )
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
     def test_ivalid_options(self):
         output, returnVal = pcs(
@@ -1868,14 +1875,14 @@ Location Constraints:
             "constraint location dummy1 rule role=foo #uname eq node1"
         )
         ac(output, "Error: invalid role 'foo', use 'master' or 'slave'\n")
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "constraint location dummy1 rule score=100 score-attribute=pingd #uname eq node1"
         )
         ac(output, "Error: can not specify both score and score-attribute\n")
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -1886,18 +1893,18 @@ Location Constraints:
             "Error: invalid rule id '1foo', '1' is not a valid first character "
                 "for a rule id\n"
         )
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint location show --full")
         ac(output, "Location Constraints:\n")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
             "constraint location dummy1 rule id=MyRule #uname eq node1"
         )
         ac(output, "")
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(temp_cib, "constraint location show --full")
         ac(output, """\
@@ -1907,7 +1914,7 @@ Location Constraints:
       Rule: score=INFINITY  (id:MyRule)
         Expression: #uname eq node1  (id:MyRule-expr)
 """)
-        self.assertEquals(0, returnVal)
+        self.assertEqual(0, returnVal)
 
         output, returnVal = pcs(
             temp_cib,
@@ -1917,7 +1924,7 @@ Location Constraints:
             output,
             "Error: id 'MyRule' is already in use, please specify another one\n"
         )
-        self.assertEquals(1, returnVal)
+        self.assertEqual(1, returnVal)
 
     def assertExpressionXml(self, rule_expression, rule_xml):
         cib_dom = xml.dom.minidom.parse("empty.xml")
diff --git a/pcs/test/test_stonith.py b/pcs/test/test_stonith.py
index 99235b8..66310ff 100644
--- a/pcs/test/test_stonith.py
+++ b/pcs/test/test_stonith.py
@@ -1,10 +1,18 @@
-import os,sys
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os
+import sys
 import shutil
 import unittest
 parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
-sys.path.insert(0,parentdir) 
+sys.path.insert(0, parentdir)
+
 import utils
-from pcs_test_functions import pcs,ac
+from pcs_test_functions import pcs, ac
+
 
 empty_cib = "empty.xml"
 temp_cib = "temp.xml"
@@ -43,8 +51,12 @@ class StonithTest(unittest.TestCase):
         ac(output,"")
 
         output, returnVal = pcs(temp_cib, "stonith show test9")
+        ac(output, """\
+ Resource: test9 (class=stonith type=fence_ilo)
+  Attributes: pcmk_status_action=xxx
+  Operations: monitor interval=60s (test9-monitor-interval-60s)
+""")
         assert returnVal == 0
-        ac(output, ' Resource: test9 (class=stonith type=fence_ilo)\n  Attributes: pcmk_status_action=xxx \n  Operations: monitor interval=60s (test9-monitor-interval-60s)\n')
 
         output, returnVal = pcs(temp_cib, "stonith delete test9")
         assert returnVal == 0
@@ -69,8 +81,12 @@ class StonithTest(unittest.TestCase):
         assert output == 'Error: unable to find resource \'apc-fencing\'\n',[output]
 
         output, returnVal = pcs(temp_cib, 'stonith show apc-fencing')
+        ac(output, """\
+ Resource: apc-fencing (class=stonith type=fence_apc)
+  Attributes: ipaddr="morph-apc" login="apc" passwd="apc" switch="1" pcmk_host_map="buzz-01:1;buzz-02:2;buzz-03:3;buzz-04:4;buzz-05:5" action="reboot" debug="1" pcmk_host_check="static-list" pcmk_host_list="buzz-01,buzz-02,buzz-03,buzz-04,buzz-05"
+  Operations: monitor interval=60s (apc-fencing-monitor-interval-60s)
+""")
         assert returnVal == 0
-        assert output == ' Resource: apc-fencing (class=stonith type=fence_apc)\n  Attributes: ipaddr="morph-apc" login="apc" passwd="apc" switch="1" pcmk_host_map="buzz-01:1;buzz-02:2;buzz-03:3;buzz-04:4;buzz-05:5" action="reboot" debug="1" pcmk_host_check="static-list" pcmk_host_list="buzz-01,buzz-02,buzz-03,buzz-04,buzz-05" \n  Operations: monitor interval=60s (apc-fencing-monitor-interval-60s)\n',[output]
 
         output, returnVal = pcs(temp_cib, 'stonith delete apc-fencing')
         assert returnVal == 0
@@ -89,16 +105,156 @@ class StonithTest(unittest.TestCase):
         assert output == " Resource: test2 (class=stonith type=fence_ilo)\n  Operations: monitor interval=60s (test2-monitor-interval-60s)\n",[output]
 
         output, returnVal = pcs(temp_cib, "stonith show --full")
+        ac(output, """\
+ Resource: test1 (class=stonith type=fence_noxist)
+  Operations: monitor interval=60s (test1-monitor-interval-60s)
+ Resource: test2 (class=stonith type=fence_ilo)
+  Operations: monitor interval=60s (test2-monitor-interval-60s)
+ Resource: test3 (class=stonith type=fence_ilo)
+  Attributes: ipaddr=test login=testA
+  Operations: monitor interval=60s (test3-monitor-interval-60s)
+""")
         assert returnVal == 0
-        assert output == " Resource: test1 (class=stonith type=fence_noxist)\n  Operations: monitor interval=60s (test1-monitor-interval-60s)\n Resource: test2 (class=stonith type=fence_ilo)\n  Operations: monitor interval=60s (test2-monitor-interval-60s)\n Resource: test3 (class=stonith type=fence_ilo)\n  Attributes: ipaddr=test login=testA \n  Operations: monitor interval=60s (test3-monitor-interval-60s)\n",[output]
 
         output, returnVal = pcs(temp_cib, 'stonith create test-fencing fence_apc pcmk_host_list="rhel7-node1 rhel7-node2" op monitor interval=61s --force')
         assert returnVal == 0
         ac(output,"")
 
         output, returnVal = pcs(temp_cib, 'config show')
+        ac(output, """\
+Cluster Name: test99
+Corosync Nodes:
+ rh7-1 rh7-2
+Pacemaker Nodes:
+
+Resources:
+
+Stonith Devices:
+ Resource: test1 (class=stonith type=fence_noxist)
+  Operations: monitor interval=60s (test1-monitor-interval-60s)
+ Resource: test2 (class=stonith type=fence_ilo)
+  Operations: monitor interval=60s (test2-monitor-interval-60s)
+ Resource: test3 (class=stonith type=fence_ilo)
+  Attributes: ipaddr=test login=testA
+  Operations: monitor interval=60s (test3-monitor-interval-60s)
+ Resource: test-fencing (class=stonith type=fence_apc)
+  Attributes: pcmk_host_list="rhel7-node1
+  Operations: monitor interval=61s (test-fencing-monitor-interval-61s)
+Fencing Levels:
+
+Location Constraints:
+Ordering Constraints:
+Colocation Constraints:
+
+Resources Defaults:
+ No defaults set
+Operations Defaults:
+ No defaults set
+
+Cluster Properties:
+""")
         assert returnVal == 0
-        assert output == 'Cluster Name: test99\nCorosync Nodes:\n rh7-1 rh7-2 \nPacemaker Nodes:\n \n\nResources: \n\nStonith Devices: \n Resource: test1 (class=stonith type=fence_noxist)\n  Operations: monitor interval=60s (test1-monitor-interval-60s)\n Resource: test2 (class=stonith type=fence_ilo)\n  Operations: monitor interval=60s (test2-monitor-interval-60s)\n Resource: test3 (class=stonith type=fence_ilo)\n  Attributes: ipaddr=test login=testA \n  Operations: monitor interval=60s  [...]
+
+    def test_stonith_create_provides_unfencing(self):
+        if utils.is_rhel6():
+            return
+
+        output, returnVal = pcs(
+            temp_cib,
+            "stonith create f1 fence_scsi"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib,
+            "stonith create f2 fence_scsi meta provides=unfencing"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib,
+            "stonith create f3 fence_scsi meta provides=something"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib,
+            "stonith create f4 fence_xvm meta provides=something"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "stonith show --full")
+        ac(output, """\
+ Resource: f1 (class=stonith type=fence_scsi)
+  Meta Attrs: provides=unfencing 
+  Operations: monitor interval=60s (f1-monitor-interval-60s)
+ Resource: f2 (class=stonith type=fence_scsi)
+  Meta Attrs: provides=unfencing 
+  Operations: monitor interval=60s (f2-monitor-interval-60s)
+ Resource: f3 (class=stonith type=fence_scsi)
+  Meta Attrs: provides=unfencing 
+  Operations: monitor interval=60s (f3-monitor-interval-60s)
+ Resource: f4 (class=stonith type=fence_xvm)
+  Meta Attrs: provides=something 
+  Operations: monitor interval=60s (f4-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
+
+    def test_stonith_create_provides_unfencing_rhel6(self):
+        if not utils.is_rhel6():
+            return
+
+        output, returnVal = pcs(
+            temp_cib,
+            "stonith create f1 fence_mpath key=abc"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib,
+            "stonith create f2 fence_mpath key=abc meta provides=unfencing"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib,
+            "stonith create f3 fence_mpath key=abc meta provides=something"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(
+            temp_cib,
+            "stonith create f4 fence_xvm meta provides=something"
+        )
+        ac(output, "")
+        self.assertEqual(0, returnVal)
+
+        output, returnVal = pcs(temp_cib, "stonith show --full")
+        ac(output, """\
+ Resource: f1 (class=stonith type=fence_mpath)
+  Attributes: key=abc
+  Meta Attrs: provides=unfencing 
+  Operations: monitor interval=60s (f1-monitor-interval-60s)
+ Resource: f2 (class=stonith type=fence_mpath)
+  Attributes: key=abc
+  Meta Attrs: provides=unfencing 
+  Operations: monitor interval=60s (f2-monitor-interval-60s)
+ Resource: f3 (class=stonith type=fence_mpath)
+  Attributes: key=abc
+  Meta Attrs: provides=unfencing 
+  Operations: monitor interval=60s (f3-monitor-interval-60s)
+ Resource: f4 (class=stonith type=fence_xvm)
+  Meta Attrs: provides=something 
+  Operations: monitor interval=60s (f4-monitor-interval-60s)
+""")
+        self.assertEqual(0, returnVal)
 
     def testStonithFenceConfirm(self):
         output, returnVal = pcs(temp_cib, "stonith fence blah blah")
@@ -115,41 +271,49 @@ class StonithTest(unittest.TestCase):
         ac(output,"")
 
         output, returnVal = pcs(temp_cib, "stonith show F1")
+        ac(output, """\
+ Resource: F1 (class=stonith type=fence_apc)
+  Attributes: pcmk_host_list="nodea nodeb"
+  Operations: monitor interval=60s (F1-monitor-interval-60s)
+""")
         assert returnVal == 0
-        assert output == ' Resource: F1 (class=stonith type=fence_apc)\n  Attributes: pcmk_host_list="nodea nodeb" \n  Operations: monitor interval=60s (F1-monitor-interval-60s)\n',[output]
 
     def testPcmkHostAllowsMissingPort(self):
         # Test that port is not required when pcmk_host_argument or
         # pcmk_host_list or pcmk_host_map is specified
+        # Port is temporarily an optional parameter. Once we are getting
+        # metadata from pacemaker, this will be reviewed and fixed.
         output, returnVal = pcs(
             temp_cib,
             'stonith create apc-1 fence_apc params ipaddr="ip" login="apc" action="reboot"'
         )
-        ac(output, """\
-Error: missing required option(s): 'port' for resource type: stonith:fence_apc (use --force to override)
-""")
-        self.assertEquals(returnVal, 1)
+#        ac(output, """\
+#Error: missing required option(s): 'port' for resource type: stonith:fence_apc (use --force to override)
+#""")
+#        self.assertEquals(returnVal, 1)
+        ac(output, "")
+        self.assertEqual(returnVal, 0)
 
         output, returnVal = pcs(
             temp_cib,
             'stonith create apc-2 fence_apc params ipaddr="ip" login="apc" action="reboot" pcmk_host_map="buzz-01:1;buzz-02:2"'
         )
         ac(output, "")
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
 
         output, returnVal = pcs(
             temp_cib,
             'stonith create apc-3 fence_apc params ipaddr="ip" login="apc" action="reboot" pcmk_host_list="buzz-01,buzz-02"'
         )
         ac(output, "")
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
 
         output, returnVal = pcs(
             temp_cib,
             'stonith create apc-4 fence_apc params ipaddr="ip" login="apc" action="reboot" pcmk_host_argument="buzz-01"'
         )
         ac(output, "")
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
 
     def testFenceLevels(self):
         output, returnVal = pcs(temp_cib, "stonith level remove 1 rh7-2 F1")
@@ -180,6 +344,26 @@ Error: missing required option(s): 'port' for resource type: stonith:fence_apc (
         assert returnVal == 0
         ac(output,"")
 
+        output, returnVal = pcs(temp_cib, "stonith level add NaN rh7-1 F3,F4")
+        ac(output, "Error: invalid level 'NaN', use a positive integer\n")
+        assert returnVal == 1
+
+        output, returnVal = pcs(temp_cib, "stonith level add -10 rh7-1 F3,F4")
+        ac(output, "Error: invalid level '-10', use a positive integer\n")
+        assert returnVal == 1
+
+        output, returnVal = pcs(temp_cib, "stonith level add 10abc rh7-1 F3,F4")
+        ac(output, "Error: invalid level '10abc', use a positive integer\n")
+        assert returnVal == 1
+
+        output, returnVal = pcs(temp_cib, "stonith level add 0 rh7-1 F3,F4")
+        ac(output, "Error: invalid level '0', use a positive integer\n")
+        assert returnVal == 1
+
+        output, returnVal = pcs(temp_cib, "stonith level add 000 rh7-1 F3,F4")
+        ac(output, "Error: invalid level '000', use a positive integer\n")
+        assert returnVal == 1
+
         output, returnVal = pcs(temp_cib, "stonith level add 1 rh7-1 F3,F4")
         assert returnVal == 0
         assert output == ""
@@ -196,13 +380,25 @@ Error: missing required option(s): 'port' for resource type: stonith:fence_apc (
         assert returnVal == 0
         assert output == ""
 
-        output, returnVal = pcs(temp_cib, "stonith level add 2 rh7-2 F2")
+        output, returnVal = pcs(temp_cib, "stonith level add 002 rh7-2 F2")
         assert returnVal == 0
         assert output == ""
 
         output, returnVal = pcs(temp_cib, "stonith show")
         assert returnVal == 0
-        ac(output,' F1\t(stonith:fence_apc):\tStopped \n F2\t(stonith:fence_apc):\tStopped \n F3\t(stonith:fence_apc):\tStopped \n F4\t(stonith:fence_apc):\tStopped \n F5\t(stonith:fence_apc):\tStopped \n Node: rh7-1\n  Level 1 - F3,F4\n  Level 2 - F5,F2\n Node: rh7-2\n  Level 1 - F1\n  Level 2 - F2\n')
+        ac(output,"""\
+ F1\t(stonith:fence_apc):\tStopped
+ F2\t(stonith:fence_apc):\tStopped
+ F3\t(stonith:fence_apc):\tStopped
+ F4\t(stonith:fence_apc):\tStopped
+ F5\t(stonith:fence_apc):\tStopped
+ Node: rh7-1
+  Level 1 - F3,F4
+  Level 2 - F5,F2
+ Node: rh7-2
+  Level 1 - F1
+  Level 2 - F2
+""")
 
         output, returnVal = pcs(temp_cib, "stonith level")
         assert returnVal == 0
@@ -375,6 +571,26 @@ Error: missing required option(s): 'port' for resource type: stonith:fence_apc (
         assert r == 0
         ac(o,"")
 
+        o,r = pcs(temp_cib, "stonith level add 10 rh7-1 F1")
+        assert r == 0
+        ac(o,"")
+
+        o,r = pcs(temp_cib, "stonith level add 010 rh7-1 F2")
+        assert r == 0
+        ac(o,"")
+
+        o,r = pcs(temp_cib, "stonith level")
+        assert r == 0
+        ac(o, """\
+ Node: rh7-1
+  Level 10 - F1
+  Level 10 - F2
+""")
+
+        o,r = pcs(temp_cib, "stonith level clear")
+        assert r == 0
+        ac(o,"")
+
         o,r = pcs(temp_cib, "stonith level add 1 rh7-bad F1 --force")
         assert r == 0
         ac(o,"")
@@ -400,75 +616,75 @@ Error: missing required option(s): 'port' for resource type: stonith:fence_apc (
         output, returnVal = pcs(
             temp_cib, "stonith create n1-ipmi fence_ilo --force"
         )
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
         ac(output, "")
 
         output, returnVal = pcs(
             temp_cib, "stonith create n2-ipmi fence_ilo --force"
         )
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
         ac(output, "")
 
         output, returnVal = pcs(
             temp_cib, "stonith create n1-apc1 fence_apc --force"
         )
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
         ac(output, "")
 
         output, returnVal = pcs(
             temp_cib, "stonith create n1-apc2 fence_apc --force"
         )
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
         ac(output, "")
 
         output, returnVal = pcs(
             temp_cib, "stonith create n2-apc1 fence_apc --force"
         )
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
         ac(output, "")
 
         output, returnVal = pcs(
             temp_cib, "stonith create n2-apc2 fence_apc --force"
         )
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
         ac(output, "")
 
         output, returnVal = pcs(
             temp_cib, "stonith create n2-apc3 fence_apc --force"
         )
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
         ac(output, "")
 
         output, returnVal = pcs(temp_cib, "stonith level add 1 rh7-1 n1-ipmi")
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
         ac(output, "")
 
         output, returnVal = pcs(
             temp_cib, "stonith level add 2 rh7-1 n1-apc1,n1-apc2,n2-apc2"
         )
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
         ac(output, "")
 
         output, returnVal = pcs(temp_cib, "stonith level add 1 rh7-2 n2-ipmi")
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
         ac(output, "")
 
         output, returnVal = pcs(
             temp_cib, "stonith level add 2 rh7-2 n2-apc1,n2-apc2,n2-apc3"
         )
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
         ac(output, "")
 
         output, returnVal = pcs(temp_cib, "stonith")
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
         ac(output, """\
- n1-ipmi\t(stonith:fence_ilo):\tStopped 
- n2-ipmi\t(stonith:fence_ilo):\tStopped 
- n1-apc1\t(stonith:fence_apc):\tStopped 
- n1-apc2\t(stonith:fence_apc):\tStopped 
- n2-apc1\t(stonith:fence_apc):\tStopped 
- n2-apc2\t(stonith:fence_apc):\tStopped 
- n2-apc3\t(stonith:fence_apc):\tStopped 
+ n1-ipmi\t(stonith:fence_ilo):\tStopped
+ n2-ipmi\t(stonith:fence_ilo):\tStopped
+ n1-apc1\t(stonith:fence_apc):\tStopped
+ n1-apc2\t(stonith:fence_apc):\tStopped
+ n2-apc1\t(stonith:fence_apc):\tStopped
+ n2-apc2\t(stonith:fence_apc):\tStopped
+ n2-apc3\t(stonith:fence_apc):\tStopped
  Node: rh7-1
   Level 1 - n1-ipmi
   Level 2 - n1-apc1,n1-apc2,n2-apc2
@@ -478,18 +694,18 @@ Error: missing required option(s): 'port' for resource type: stonith:fence_apc (
 """)
 
         output, returnVal = pcs(temp_cib, "stonith delete n2-apc2")
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
         ac(output, "Deleting Resource - n2-apc2\n")
 
         output, returnVal = pcs(temp_cib, "stonith")
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
         ac(output, """\
- n1-ipmi\t(stonith:fence_ilo):\tStopped 
- n2-ipmi\t(stonith:fence_ilo):\tStopped 
- n1-apc1\t(stonith:fence_apc):\tStopped 
- n1-apc2\t(stonith:fence_apc):\tStopped 
- n2-apc1\t(stonith:fence_apc):\tStopped 
- n2-apc3\t(stonith:fence_apc):\tStopped 
+ n1-ipmi\t(stonith:fence_ilo):\tStopped
+ n2-ipmi\t(stonith:fence_ilo):\tStopped
+ n1-apc1\t(stonith:fence_apc):\tStopped
+ n1-apc2\t(stonith:fence_apc):\tStopped
+ n2-apc1\t(stonith:fence_apc):\tStopped
+ n2-apc3\t(stonith:fence_apc):\tStopped
  Node: rh7-1
   Level 1 - n1-ipmi
   Level 2 - n1-apc1,n1-apc2
@@ -499,17 +715,17 @@ Error: missing required option(s): 'port' for resource type: stonith:fence_apc (
 """)
 
         output, returnVal = pcs(temp_cib, "stonith delete n2-apc1")
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
         ac(output, "Deleting Resource - n2-apc1\n")
 
         output, returnVal = pcs(temp_cib, "stonith")
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
         ac(output, """\
- n1-ipmi\t(stonith:fence_ilo):\tStopped 
- n2-ipmi\t(stonith:fence_ilo):\tStopped 
- n1-apc1\t(stonith:fence_apc):\tStopped 
- n1-apc2\t(stonith:fence_apc):\tStopped 
- n2-apc3\t(stonith:fence_apc):\tStopped 
+ n1-ipmi\t(stonith:fence_ilo):\tStopped
+ n2-ipmi\t(stonith:fence_ilo):\tStopped
+ n1-apc1\t(stonith:fence_apc):\tStopped
+ n1-apc2\t(stonith:fence_apc):\tStopped
+ n2-apc3\t(stonith:fence_apc):\tStopped
  Node: rh7-1
   Level 1 - n1-ipmi
   Level 2 - n1-apc1,n1-apc2
@@ -519,16 +735,16 @@ Error: missing required option(s): 'port' for resource type: stonith:fence_apc (
 """)
 
         output, returnVal = pcs(temp_cib, "stonith delete n2-apc3")
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
         ac(output, "Deleting Resource - n2-apc3\n")
 
         output, returnVal = pcs(temp_cib, "stonith")
-        self.assertEquals(returnVal, 0)
+        self.assertEqual(returnVal, 0)
         ac(output, """\
- n1-ipmi\t(stonith:fence_ilo):\tStopped 
- n2-ipmi\t(stonith:fence_ilo):\tStopped 
- n1-apc1\t(stonith:fence_apc):\tStopped 
- n1-apc2\t(stonith:fence_apc):\tStopped 
+ n1-ipmi\t(stonith:fence_ilo):\tStopped
+ n2-ipmi\t(stonith:fence_ilo):\tStopped
+ n1-apc1\t(stonith:fence_apc):\tStopped
+ n1-apc2\t(stonith:fence_apc):\tStopped
  Node: rh7-1
   Level 1 - n1-ipmi
   Level 2 - n1-apc1,n1-apc2
@@ -536,30 +752,59 @@ Error: missing required option(s): 'port' for resource type: stonith:fence_apc (
   Level 1 - n2-ipmi
 """)
 
+        output, returnVal = pcs(temp_cib, "resource delete n1-apc1")
+        self.assertEqual(returnVal, 0)
+        ac(output, "Deleting Resource - n1-apc1\n")
+
+        output, returnVal = pcs(temp_cib, "stonith")
+        self.assertEqual(returnVal, 0)
+        ac(output, """\
+ n1-ipmi\t(stonith:fence_ilo):\tStopped
+ n2-ipmi\t(stonith:fence_ilo):\tStopped
+ n1-apc2\t(stonith:fence_apc):\tStopped
+ Node: rh7-1
+  Level 1 - n1-ipmi
+  Level 2 - n1-apc2
+ Node: rh7-2
+  Level 1 - n2-ipmi
+""")
+
+        output, returnVal = pcs(temp_cib, "resource delete n1-apc2")
+        self.assertEqual(returnVal, 0)
+        ac(output, "Deleting Resource - n1-apc2\n")
+
+        output, returnVal = pcs(temp_cib, "stonith")
+        self.assertEqual(returnVal, 0)
+        ac(output, """\
+ n1-ipmi\t(stonith:fence_ilo):\tStopped
+ n2-ipmi\t(stonith:fence_ilo):\tStopped
+ Node: rh7-1
+  Level 1 - n1-ipmi
+ Node: rh7-2
+  Level 1 - n2-ipmi
+""")
+
     def testNoStonithWarning(self):
-        o,r = pcs("status")
+        o,r = pcs(temp_cib, "status")
         assert "WARNING: no stonith devices and " in o
-        assert r == 0
 
-        o,r = pcs("stonith create test_stonith fence_apc ipaddr=ip login=lgn, action=reboot, pcmk_host_argument=node1")
+        o,r = pcs(temp_cib, "stonith create test_stonith fence_apc ipaddr=ip login=lgn, action=reboot, pcmk_host_argument=node1")
         ac(o,"")
         assert r == 0
 
-        o,r = pcs("status")
+        o,r = pcs(temp_cib, "status")
         assert "WARNING: no stonith devices and " not in o
-        assert r == 0
 
-        o,r = pcs("stonith delete test_stonith")
+        o,r = pcs(temp_cib, "stonith delete test_stonith")
         ac(o,"Deleting Resource - test_stonith\n")
         assert r == 0
 
-        o,r = pcs("stonith create test_stonith fence_apc ipaddr=ip login=lgn, action=reboot, pcmk_host_argument=node1 --clone")
+        o,r = pcs(temp_cib, "stonith create test_stonith fence_apc ipaddr=ip login=lgn, action=reboot, pcmk_host_argument=node1 --clone")
         ac(o,"")
         assert r == 0
 
-        o,r = pcs("status")
+        o,r = pcs(temp_cib, "status")
         assert "WARNING: no stonith devices and " not in o
-        assert r == 0
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/pcs/test/test_utils.py b/pcs/test/test_utils.py
index b1698a6..86871ff 100644
--- a/pcs/test/test_utils.py
+++ b/pcs/test/test_utils.py
@@ -1,20 +1,32 @@
-import os
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os.path
 import sys
 import shutil
 import unittest
 import xml.dom.minidom
-parentdir = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
+currentdir = os.path.dirname(os.path.abspath(__file__))
+parentdir = os.path.dirname(currentdir)
 sys.path.insert(0, parentdir)
-from pcs_test_functions import pcs, ac
+
 import utils
+from pcs_test_functions import pcs, ac
 
-empty_cib = "empty.xml"
-temp_cib = "temp.xml"
+
+cib_with_nodes =  os.path.join(currentdir, "empty-withnodes.xml")
+empty_cib = os.path.join(currentdir, "empty.xml")
+temp_cib = os.path.join(currentdir, "temp.xml")
 
 class UtilsTest(unittest.TestCase):
 
     def get_cib_empty(self):
-        return xml.dom.minidom.parse("empty.xml")
+        return xml.dom.minidom.parse(empty_cib)
+
+    def get_cib_with_nodes_minidom(self):
+        return xml.dom.minidom.parse(cib_with_nodes)
 
     def get_cib_resources(self):
         cib_dom = self.get_cib_empty()
@@ -28,6 +40,14 @@ class UtilsTest(unittest.TestCase):
                           class="ocf" provider="heartbeat" type="Dummy">
                       </primitive>
                   </clone>
+                  <clone id="myUniqueClone">
+                      <primitive id="myUniqueClonedResource"
+                          class="ocf" provider="heartbeat" type="Dummy">
+                      </primitive>
+                      <meta-attributes>
+                        <nvpair name="globally-unique" value="true" />
+                      </meta-attributes>
+                  </clone>
                   <master id="myMaster">
                       <primitive id="myMasteredResource"
                             class="ocf" provider="heartbeat" type="Dummy">
@@ -93,6 +113,7 @@ class UtilsTest(unittest.TestCase):
         all_ids = set([
             "none", "myResource",
             "myClone", "myClonedResource",
+            "myUniqueClone", "myUniqueClonedResource",
             "myMaster", "myMasteredResource",
             "myGroup", "myGroupedResource",
             "myGroupClone", "myClonedGroup", "myClonedGroupedResource",
@@ -101,7 +122,8 @@ class UtilsTest(unittest.TestCase):
 
         resource_ids = set([
             "myResource",
-            "myClonedResource", "myGroupedResource", "myMasteredResource",
+            "myClonedResource", "myUniqueClonedResource",
+            "myGroupedResource", "myMasteredResource",
             "myClonedGroupedResource", "myMasteredGroupedResource"
         ])
         test_dom_get(
@@ -109,7 +131,10 @@ class UtilsTest(unittest.TestCase):
             resource_ids, all_ids - resource_ids
         )
 
-        cloned_ids = set(["myClonedResource", "myClonedGroupedResource"])
+        cloned_ids = set([
+            "myClonedResource", "myUniqueClonedResource",
+            "myClonedGroupedResource"
+        ])
         test_dom_get(
             utils.dom_get_resource_clone, cib_dom,
             cloned_ids, all_ids - cloned_ids
@@ -132,7 +157,7 @@ class UtilsTest(unittest.TestCase):
             cloned_group_ids, all_ids - cloned_group_ids
         )
 
-        clone_ids = set(["myClone", "myGroupClone"])
+        clone_ids = set(["myClone", "myUniqueClone", "myGroupClone"])
         test_dom_get(
             utils.dom_get_clone, cib_dom,
             clone_ids, all_ids - clone_ids
@@ -200,21 +225,21 @@ class UtilsTest(unittest.TestCase):
             ),
             "myGroupMaster"
         )
-        self.assertEquals(
+        self.assertEqual(
             None,
             utils.dom_get_resource_clone_ms_parent(cib_dom, "myResource")
         )
-        self.assertEquals(
+        self.assertEqual(
             None,
             utils.dom_get_resource_clone_ms_parent(cib_dom, "myGroup")
         )
-        self.assertEquals(
+        self.assertEqual(
             None,
             utils.dom_get_resource_clone_ms_parent(cib_dom, "myGroupedResource")
         )
 
     def testDomGetResourceRemoteNodeName(self):
-        dom = xml.dom.minidom.parse("empty.xml")
+        dom = self.get_cib_empty()
         new_resources = xml.dom.minidom.parseString("""
             <resources>
                 <primitive id="dummy1"
@@ -245,19 +270,19 @@ class UtilsTest(unittest.TestCase):
         resources = dom.getElementsByTagName("resources")[0]
         resources.parentNode.replaceChild(new_resources, resources)
 
-        self.assertEquals(
+        self.assertEqual(
             None,
             utils.dom_get_resource_remote_node_name(
                 utils.dom_get_resource(dom, "dummy1")
             )
         )
-        self.assertEquals(
+        self.assertEqual(
             None,
             utils.dom_get_resource_remote_node_name(
                 utils.dom_get_resource(dom, "dummy2")
             )
         )
-        self.assertEquals(
+        self.assertEqual(
             "guest1",
             utils.dom_get_resource_remote_node_name(
                 utils.dom_get_resource(dom, "vm-guest1")
@@ -265,7 +290,7 @@ class UtilsTest(unittest.TestCase):
         )
 
     def test_dom_get_meta_attr_value(self):
-        dom = xml.dom.minidom.parse("empty.xml")
+        dom = self.get_cib_empty()
         new_resources = xml.dom.minidom.parseString("""
             <resources>
                 <primitive id="dummy1"
@@ -296,25 +321,25 @@ class UtilsTest(unittest.TestCase):
         resources = dom.getElementsByTagName("resources")[0]
         resources.parentNode.replaceChild(new_resources, resources)
 
-        self.assertEquals(
+        self.assertEqual(
             None,
             utils.dom_get_meta_attr_value(
                 utils.dom_get_resource(dom, "dummy1"), "foo"
             )
         )
-        self.assertEquals(
+        self.assertEqual(
             None,
             utils.dom_get_meta_attr_value(
                 utils.dom_get_resource(dom, "dummy2"), "remote-node"
             )
         )
-        self.assertEquals(
+        self.assertEqual(
             "guest1",
             utils.dom_get_meta_attr_value(
                 utils.dom_get_resource(dom, "vm-guest1"), "remote-node"
             )
         )
-        self.assertEquals(
+        self.assertEqual(
             None,
             utils.dom_get_meta_attr_value(
                 utils.dom_get_resource(dom, "vm-guest1"), "foo"
@@ -352,10 +377,10 @@ class UtilsTest(unittest.TestCase):
             ),
             "cc1"
         )
-        self.assertEquals(None, utils.dom_get_element_with_id(dom, "dd", "bb1"))
-        self.assertEquals(None, utils.dom_get_element_with_id(dom, "bb", "bb4"))
-        self.assertEquals(None, utils.dom_get_element_with_id(dom, "bb", "cc1"))
-        self.assertEquals(
+        self.assertEqual(None, utils.dom_get_element_with_id(dom, "dd", "bb1"))
+        self.assertEqual(None, utils.dom_get_element_with_id(dom, "bb", "bb4"))
+        self.assertEqual(None, utils.dom_get_element_with_id(dom, "bb", "cc1"))
+        self.assertEqual(
             None,
             utils.dom_get_element_with_id(
                 utils.dom_get_element_with_id(dom, "bb", "bb2"),
@@ -393,42 +418,42 @@ class UtilsTest(unittest.TestCase):
             "bb2"
         )
 
-        self.assertEquals(None, utils.dom_get_parent_by_tag_name(bb1, "cc"))
-        self.assertEquals(None, utils.dom_get_parent_by_tag_name(cc1, "dd"))
-        self.assertEquals(None, utils.dom_get_parent_by_tag_name(cc1, "ee"))
+        self.assertEqual(None, utils.dom_get_parent_by_tag_name(bb1, "cc"))
+        self.assertEqual(None, utils.dom_get_parent_by_tag_name(cc1, "dd"))
+        self.assertEqual(None, utils.dom_get_parent_by_tag_name(cc1, "ee"))
 
     def testValidateConstraintResource(self):
         dom = self.get_cib_resources()
-        self.assertEquals(
+        self.assertEqual(
             (True, "", "myClone"),
             utils.validate_constraint_resource(dom, "myClone")
         )
-        self.assertEquals(
+        self.assertEqual(
             (True, "", "myGroupClone"),
             utils.validate_constraint_resource(dom, "myGroupClone")
         )
-        self.assertEquals(
+        self.assertEqual(
             (True, "", "myMaster"),
             utils.validate_constraint_resource(dom, "myMaster")
         )
-        self.assertEquals(
+        self.assertEqual(
             (True, "", "myGroupMaster"),
             utils.validate_constraint_resource(dom, "myGroupMaster")
         )
-        self.assertEquals(
+        self.assertEqual(
             (True, "", "myResource"),
             utils.validate_constraint_resource(dom, "myResource")
         )
-        self.assertEquals(
+        self.assertEqual(
             (True, "", "myGroup"),
             utils.validate_constraint_resource(dom, "myGroup")
         )
-        self.assertEquals(
+        self.assertEqual(
             (True, "", "myGroupedResource"),
             utils.validate_constraint_resource(dom, "myGroupedResource")
         )
 
-        self.assertEquals(
+        self.assertEqual(
             (False, "Resource 'myNonexistent' does not exist", None),
             utils.validate_constraint_resource(dom, "myNonexistent")
         )
@@ -437,7 +462,7 @@ class UtilsTest(unittest.TestCase):
             "%s is a clone resource, you should use the clone id: "
             "%s when adding constraints. Use --force to override."
         )
-        self.assertEquals(
+        self.assertEqual(
             (
                 False,
                 message % ("myClonedResource", "myClone"),
@@ -445,7 +470,7 @@ class UtilsTest(unittest.TestCase):
             ),
             utils.validate_constraint_resource(dom, "myClonedResource")
         )
-        self.assertEquals(
+        self.assertEqual(
             (
                 False,
                 message % ("myClonedGroup", "myGroupClone"),
@@ -453,7 +478,7 @@ class UtilsTest(unittest.TestCase):
             ),
             utils.validate_constraint_resource(dom, "myClonedGroup")
         )
-        self.assertEquals(
+        self.assertEqual(
             (
                 False,
                 message % ("myClonedGroupedResource", "myGroupClone"),
@@ -466,7 +491,7 @@ class UtilsTest(unittest.TestCase):
             "%s is a master/slave resource, you should use the master id: "
             "%s when adding constraints. Use --force to override."
         )
-        self.assertEquals(
+        self.assertEqual(
             (
                 False,
                 message % ("myMasteredResource", "myMaster"),
@@ -474,7 +499,7 @@ class UtilsTest(unittest.TestCase):
             ),
             utils.validate_constraint_resource(dom, "myMasteredResource")
         )
-        self.assertEquals(
+        self.assertEqual(
             (
                 False,
                 message % ("myMasteredGroup", "myGroupMaster"),
@@ -482,7 +507,7 @@ class UtilsTest(unittest.TestCase):
             ),
             utils.validate_constraint_resource(dom, "myMasteredGroup")
         )
-        self.assertEquals(
+        self.assertEqual(
             (
                 False,
                 message % ("myMasteredGroupedResource", "myGroupMaster"),
@@ -492,98 +517,98 @@ class UtilsTest(unittest.TestCase):
         )
 
         utils.pcs_options["--force"] = True
-        self.assertEquals(
+        self.assertEqual(
             (True, "", "myClone"),
             utils.validate_constraint_resource(dom, "myClonedResource")
         )
-        self.assertEquals(
+        self.assertEqual(
             (True, "", "myGroupClone"),
             utils.validate_constraint_resource(dom, "myClonedGroup")
         )
-        self.assertEquals(
+        self.assertEqual(
             (True, "", "myGroupClone"),
             utils.validate_constraint_resource(dom, "myClonedGroupedResource")
         )
-        self.assertEquals(
+        self.assertEqual(
             (True, "", "myMaster"),
             utils.validate_constraint_resource(dom, "myMasteredResource")
         )
-        self.assertEquals(
+        self.assertEqual(
             (True, "", "myGroupMaster"),
             utils.validate_constraint_resource(dom, "myMasteredGroup")
         )
-        self.assertEquals(
+        self.assertEqual(
             (True, "", "myGroupMaster"),
             utils.validate_constraint_resource(dom, "myMasteredGroupedResource")
         )
 
     def testValidateXmlId(self):
-        self.assertEquals((True, ""), utils.validate_xml_id("dummy"))
-        self.assertEquals((True, ""), utils.validate_xml_id("DUMMY"))
-        self.assertEquals((True, ""), utils.validate_xml_id("dUmMy"))
-        self.assertEquals((True, ""), utils.validate_xml_id("dummy0"))
-        self.assertEquals((True, ""), utils.validate_xml_id("dum0my"))
-        self.assertEquals((True, ""), utils.validate_xml_id("dummy-"))
-        self.assertEquals((True, ""), utils.validate_xml_id("dum-my"))
-        self.assertEquals((True, ""), utils.validate_xml_id("dummy."))
-        self.assertEquals((True, ""), utils.validate_xml_id("dum.my"))
-        self.assertEquals((True, ""), utils.validate_xml_id("_dummy"))
-        self.assertEquals((True, ""), utils.validate_xml_id("dummy_"))
-        self.assertEquals((True, ""), utils.validate_xml_id("dum_my"))
-
-        self.assertEquals(
+        self.assertEqual((True, ""), utils.validate_xml_id("dummy"))
+        self.assertEqual((True, ""), utils.validate_xml_id("DUMMY"))
+        self.assertEqual((True, ""), utils.validate_xml_id("dUmMy"))
+        self.assertEqual((True, ""), utils.validate_xml_id("dummy0"))
+        self.assertEqual((True, ""), utils.validate_xml_id("dum0my"))
+        self.assertEqual((True, ""), utils.validate_xml_id("dummy-"))
+        self.assertEqual((True, ""), utils.validate_xml_id("dum-my"))
+        self.assertEqual((True, ""), utils.validate_xml_id("dummy."))
+        self.assertEqual((True, ""), utils.validate_xml_id("dum.my"))
+        self.assertEqual((True, ""), utils.validate_xml_id("_dummy"))
+        self.assertEqual((True, ""), utils.validate_xml_id("dummy_"))
+        self.assertEqual((True, ""), utils.validate_xml_id("dum_my"))
+
+        self.assertEqual(
             (False, "test id cannot be empty"),
             utils.validate_xml_id("", "test id")
         )
 
         msg = "invalid test id '%s', '%s' is not a valid first character for a test id"
-        self.assertEquals(
+        self.assertEqual(
             (False, msg % ("0", "0")),
             utils.validate_xml_id("0", "test id")
         )
-        self.assertEquals(
+        self.assertEqual(
             (False, msg % ("-", "-")),
             utils.validate_xml_id("-", "test id")
         )
-        self.assertEquals(
+        self.assertEqual(
             (False, msg % (".", ".")),
             utils.validate_xml_id(".", "test id")
         )
-        self.assertEquals(
+        self.assertEqual(
             (False, msg % (":", ":")),
             utils.validate_xml_id(":", "test id")
         )
-        self.assertEquals(
+        self.assertEqual(
             (False, msg % ("0dummy", "0")),
             utils.validate_xml_id("0dummy", "test id")
         )
-        self.assertEquals(
+        self.assertEqual(
             (False, msg % ("-dummy", "-")),
             utils.validate_xml_id("-dummy", "test id")
         )
-        self.assertEquals(
+        self.assertEqual(
             (False, msg % (".dummy", ".")),
             utils.validate_xml_id(".dummy", "test id")
         )
-        self.assertEquals(
+        self.assertEqual(
             (False, msg % (":dummy", ":")),
             utils.validate_xml_id(":dummy", "test id")
         )
 
         msg = "invalid test id '%s', '%s' is not a valid character for a test id"
-        self.assertEquals(
+        self.assertEqual(
             (False, msg % ("dum:my", ":")),
             utils.validate_xml_id("dum:my", "test id")
         )
-        self.assertEquals(
+        self.assertEqual(
             (False, msg % ("dummy:", ":")),
             utils.validate_xml_id("dummy:", "test id")
         )
-        self.assertEquals(
+        self.assertEqual(
             (False, msg % ("dum?my", "?")),
             utils.validate_xml_id("dum?my", "test id")
         )
-        self.assertEquals(
+        self.assertEqual(
             (False, msg % ("dummy?", "?")),
             utils.validate_xml_id("dummy?", "test id")
         )
@@ -635,69 +660,25 @@ class UtilsTest(unittest.TestCase):
         self.assertFalse(utils.is_score("+10+INFINITY"))
 
     def test_get_timeout_seconds(self):
-        self.assertEquals(utils.get_timeout_seconds("10"), 10)
-        self.assertEquals(utils.get_timeout_seconds("10s"), 10)
-        self.assertEquals(utils.get_timeout_seconds("10min"), 600)
-
-        self.assertEquals(utils.get_timeout_seconds("1a1s"), None)
-        self.assertEquals(utils.get_timeout_seconds("10m"), None)
-        self.assertEquals(utils.get_timeout_seconds("10mim"), None)
-        self.assertEquals(utils.get_timeout_seconds("aaa"), None)
-        self.assertEquals(utils.get_timeout_seconds(""), None)
-
-        self.assertEquals(utils.get_timeout_seconds("1a1s", True), "1a1s")
-        self.assertEquals(utils.get_timeout_seconds("10m", True), "10m")
-        self.assertEquals(utils.get_timeout_seconds("10mim", True), "10mim")
-        self.assertEquals(utils.get_timeout_seconds("aaa", True), "aaa")
-        self.assertEquals(utils.get_timeout_seconds("", True), "")
-
-    def test_get_default_op_timeout(self):
-        shutil.copy(empty_cib, temp_cib)
-        utils.usefile = True
-        utils.filename = temp_cib
-
-        self.assertEquals(utils.get_default_op_timeout(), 20)
-        output, retVal = pcs(temp_cib, "property set default-action-timeout=25")
-        self.assertEquals(retVal, 0)
-        self.assertEquals(utils.get_default_op_timeout(), 25)
-        output, retVal = pcs(temp_cib, "property unset default-action-timeout")
-        self.assertEquals(retVal, 0)
-        self.assertEquals(utils.get_default_op_timeout(), 20)
-
-        utils.usefile = False
-        utils.filename = ""
-
-    def test_get_resource_op_timeout(self):
-        shutil.copy(empty_cib, temp_cib)
-        utils.usefile = True
-        utils.filename = temp_cib
-
-        output, retVal = pcs(temp_cib, "property set default-action-timeout=25")
-        ac(output, "")
-        self.assertEquals(retVal, 0)
-        output, retVal = pcs(
-            temp_cib,
-            "resource create dummy Dummy op start timeout=33s --no-default-ops"
-        )
-        ac(output, "")
-        self.assertEquals(retVal, 0)
-        dom = xml.dom.minidom.parse(temp_cib)
-
-        self.assertEquals(
-            utils.get_resource_op_timeout(dom, "dummy", "start"),
-            33
-        )
-        self.assertEquals(
-            utils.get_resource_op_timeout(dom, "dummy", "stop"),
-            20
-        )
-        self.assertEquals(
-            utils.get_resource_op_timeout(dom, "dummy0", "start"),
-            25
-        )
-
-        utils.usefile = False
-        utils.filename = ""
+        self.assertEqual(utils.get_timeout_seconds("10"), 10)
+        self.assertEqual(utils.get_timeout_seconds("10s"), 10)
+        self.assertEqual(utils.get_timeout_seconds("10sec"), 10)
+        self.assertEqual(utils.get_timeout_seconds("10m"), 600)
+        self.assertEqual(utils.get_timeout_seconds("10min"), 600)
+        self.assertEqual(utils.get_timeout_seconds("10h"), 36000)
+        self.assertEqual(utils.get_timeout_seconds("10hr"), 36000)
+
+        self.assertEqual(utils.get_timeout_seconds("1a1s"), None)
+        self.assertEqual(utils.get_timeout_seconds("10mm"), None)
+        self.assertEqual(utils.get_timeout_seconds("10mim"), None)
+        self.assertEqual(utils.get_timeout_seconds("aaa"), None)
+        self.assertEqual(utils.get_timeout_seconds(""), None)
+
+        self.assertEqual(utils.get_timeout_seconds("1a1s", True), "1a1s")
+        self.assertEqual(utils.get_timeout_seconds("10mm", True), "10mm")
+        self.assertEqual(utils.get_timeout_seconds("10mim", True), "10mim")
+        self.assertEqual(utils.get_timeout_seconds("aaa", True), "aaa")
+        self.assertEqual(utils.get_timeout_seconds("", True), "")
 
     def get_cib_status_lrm(self):
         cib_dom = self.get_cib_empty()
@@ -737,80 +718,6 @@ class UtilsTest(unittest.TestCase):
         status.parentNode.replaceChild(new_status, status)
         return cib_dom
 
-    def test_get_lrm_rsc_op(self):
-        dom = self.get_cib_status_lrm()
-
-        op_list = utils.get_lrm_rsc_op(dom, "dummy")
-        op_id_list = [op.getAttribute("id") for op in op_list]
-        self.assertEquals(
-            op_id_list,
-            ["dummy_monitor_0", "dummy_stop_0", "dummy_start_0",
-                "dummy_monitor_30000",]
-        )
-        op_list = utils.get_lrm_rsc_op(dom, "dummy", ["monitor"])
-        op_id_list = [op.getAttribute("id") for op in op_list]
-        self.assertEquals(
-            op_id_list,
-            ["dummy_monitor_0", "dummy_monitor_30000",]
-        )
-        op_list = utils.get_lrm_rsc_op(dom, "dummy", ["stop", "start"])
-        op_id_list = [op.getAttribute("id") for op in op_list]
-        self.assertEquals(
-            op_id_list,
-            ["dummy_stop_0", "dummy_start_0",]
-        )
-        op_list = utils.get_lrm_rsc_op(dom, "dummy", last_call_id=30)
-        op_id_list = [op.getAttribute("id") for op in op_list]
-        self.assertEquals(
-            op_id_list,
-            ["dummy_stop_0", "dummy_start_0", "dummy_monitor_30000",]
-        )
-        op_list = utils.get_lrm_rsc_op(dom, "dummy", ["monitor"], 30)
-        op_id_list = [op.getAttribute("id") for op in op_list]
-        self.assertEquals(
-            op_id_list,
-            ["dummy_monitor_30000",]
-        )
-
-        op_list = utils.get_lrm_rsc_op(dom, "dummy", last_call_id=340)
-        op_id_list = [op.getAttribute("id") for op in op_list]
-        self.assertEquals(op_id_list, [])
-        op_list = utils.get_lrm_rsc_op(dom, "dummy", last_call_id=34)
-        op_id_list = [op.getAttribute("id") for op in op_list]
-        self.assertEquals(op_id_list, [])
-        op_list = utils.get_lrm_rsc_op(dom, "dummy0", ["monitor"], 30)
-        op_id_list = [op.getAttribute("id") for op in op_list]
-        self.assertEquals(op_id_list, [])
-        op_list = utils.get_lrm_rsc_op(dom, "dummy0", ["monitor"])
-        op_id_list = [op.getAttribute("id") for op in op_list]
-        self.assertEquals(op_id_list, [])
-        op_list = utils.get_lrm_rsc_op(dom, "dummy0", last_call_id=30)
-        op_id_list = [op.getAttribute("id") for op in op_list]
-        self.assertEquals(op_id_list, [])
-        op_list = utils.get_lrm_rsc_op(dom, "dummy0")
-        op_id_list = [op.getAttribute("id") for op in op_list]
-        self.assertEquals(op_id_list, [])
-
-    def test_get_lrm_rsc_op_failures(self):
-        dom = self.get_cib_status_lrm()
-
-        failures = utils.get_lrm_rsc_op_failures(
-            utils.get_lrm_rsc_op(dom, "dummy")
-        )
-        self.assertEquals(
-            failures,
-            ["rh70-node2: failed", "Xrh70-node1X: test"]
-        )
-
-        failures = utils.get_lrm_rsc_op_failures(
-            utils.get_lrm_rsc_op(dom, "dummy", ["start"])
-        )
-        self.assertEquals(failures, [])
-        failures = utils.get_lrm_rsc_op_failures(
-            utils.get_lrm_rsc_op(dom, "dummy0")
-        )
-        self.assertEquals(failures, [])
-
     def test_resource_running_on(self):
         status = xml.dom.minidom.parseString("""
 <crm_mon>
@@ -892,357 +799,187 @@ class UtilsTest(unittest.TestCase):
 </crm_mon>
         """).documentElement
 
-        self.assertEquals(
+        self.assertEqual(
             utils.resource_running_on("myResource", status),
             {
                 'message':
                     "Resource 'myResource' is running on node rh70-node1.",
+                'is_running': True,
                 'nodes_master': [],
                 'nodes_slave': [],
                 'nodes_started': ["rh70-node1"],
             }
         )
-        self.assertEquals(
+        self.assertEqual(
             utils.resource_running_on("myClonedResource", status),
             {
                 'message':
                     "Resource 'myClonedResource' is running on nodes "
                         "rh70-node1, rh70-node2, rh70-node3.",
+                'is_running': True,
                 'nodes_master': [],
                 'nodes_slave': [],
                 'nodes_started': ["rh70-node1", "rh70-node2", "rh70-node3"],
             }
         )
-        self.assertEquals(
+        self.assertEqual(
             utils.resource_running_on("myClone", status),
             {
                 'message':
                     "Resource 'myClone' is running on nodes "
                         "rh70-node1, rh70-node2, rh70-node3.",
+                'is_running': True,
                 'nodes_master': [],
                 'nodes_slave': [],
                 'nodes_started': ["rh70-node1", "rh70-node2", "rh70-node3"],
             }
         )
-        self.assertEquals(
+        self.assertEqual(
             utils.resource_running_on("myMasteredResource", status),
             {
                 'message':
                     "Resource 'myMasteredResource' is master on node "
                         "rh70-node1; slave on nodes rh70-node2, rh70-node3.",
+                'is_running': True,
                 'nodes_master': ["rh70-node1"],
                 'nodes_slave': ["rh70-node2", "rh70-node3"],
                 'nodes_started': [],
             }
         )
-        self.assertEquals(
+        self.assertEqual(
             utils.resource_running_on("myMaster", status),
             {
                 'message':
                     "Resource 'myMaster' is master on node "
                         "rh70-node1; slave on nodes rh70-node2, rh70-node3.",
+                'is_running': True,
                 'nodes_master': ["rh70-node1"],
                 'nodes_slave': ["rh70-node2", "rh70-node3"],
                 'nodes_started': [],
             }
         )
-        self.assertEquals(
+        self.assertEqual(
             utils.resource_running_on("myGroupedResource", status),
             {
                 'message':
                     "Resource 'myGroupedResource' is running on node "
                         "rh70-node2.",
+                'is_running': True,
                 'nodes_master': [],
                 'nodes_slave': [],
                 'nodes_started': ["rh70-node2"],
             }
         )
-        self.assertEquals(
+        self.assertEqual(
             utils.resource_running_on("myGroup", status),
             {
                 'message':
                     "Resource 'myGroup' is running on node "
                         "rh70-node2.",
+                'is_running': True,
                 'nodes_master': [],
                 'nodes_slave': [],
                 'nodes_started': ["rh70-node2"],
             }
         )
-        self.assertEquals(
+        self.assertEqual(
             utils.resource_running_on("myClonedGroupedResource", status),
             {
                 'message':
                     "Resource 'myClonedGroupedResource' is running on nodes "
                         "rh70-node1, rh70-node2, rh70-node3, rh70-node3.",
+                'is_running': True,
                 'nodes_master': [],
                 'nodes_slave': [],
                 'nodes_started': ["rh70-node1", "rh70-node2", "rh70-node3",
                     "rh70-node3"],
             }
         )
-        self.assertEquals(
+        self.assertEqual(
             utils.resource_running_on("myClonedGroup", status),
             {
                 'message':
                     "Resource 'myClonedGroup' is running on nodes "
                         "rh70-node1, rh70-node2, rh70-node3, rh70-node3.",
+                'is_running': True,
                 'nodes_master': [],
                 'nodes_slave': [],
                 'nodes_started': ["rh70-node1", "rh70-node2", "rh70-node3",
                     "rh70-node3"],
             }
         )
-        self.assertEquals(
+        self.assertEqual(
             utils.resource_running_on("myGroupClone", status),
             {
                 'message':
                     "Resource 'myGroupClone' is running on nodes "
                         "rh70-node1, rh70-node2, rh70-node3, rh70-node3.",
+                'is_running': True,
                 'nodes_master': [],
                 'nodes_slave': [],
                 'nodes_started': ["rh70-node1", "rh70-node2", "rh70-node3",
                     "rh70-node3"],
             }
         )
-        self.assertEquals(
+        self.assertEqual(
             utils.resource_running_on("myMasteredGroupedResource", status),
             {
                 'message':
                     "Resource 'myMasteredGroupedResource' is master on node "
                         "rh70-node2; slave on nodes rh70-node1, rh70-node3.",
+                'is_running': True,
                 'nodes_master': ["rh70-node2"],
                 'nodes_slave': ["rh70-node1", "rh70-node3"],
                 'nodes_started': [],
             }
         )
-        self.assertEquals(
+        self.assertEqual(
             utils.resource_running_on("myMasteredGroup", status),
             {
                 'message':
                     "Resource 'myMasteredGroup' is master on node "
                         "rh70-node2; slave on nodes rh70-node1, rh70-node3.",
+                'is_running': True,
                 'nodes_master': ["rh70-node2"],
                 'nodes_slave': ["rh70-node1", "rh70-node3"],
                 'nodes_started': [],
             }
         )
-        self.assertEquals(
+        self.assertEqual(
             utils.resource_running_on("myGroupMaster", status),
             {
                 'message':
                     "Resource 'myGroupMaster' is master on node "
                         "rh70-node2; slave on nodes rh70-node1, rh70-node3.",
+                'is_running': True,
                 'nodes_master': ["rh70-node2"],
                 'nodes_slave': ["rh70-node1", "rh70-node3"],
                 'nodes_started': [],
             }
         )
-        self.assertEquals(
+        self.assertEqual(
             utils.resource_running_on("notMyResource", status),
             {
                 'message':
                     "Resource 'notMyResource' is not running on any node",
+                'is_running': False,
                 'nodes_master': [],
                 'nodes_slave': [],
                 'nodes_started': [],
             }
         )
-        self.assertEquals(
+        self.assertEqual(
             utils.resource_running_on("myStoppedResource", status),
             {
                 'message':
                     "Resource 'myStoppedResource' is not running on any node",
+                'is_running': False,
                 'nodes_master': [],
                 'nodes_slave': [],
                 'nodes_started': [],
             }
         )
 
-    def test_count_expected_resource_instances(self):
-        dom = xml.dom.minidom.parse("empty.xml")
-        new_resources = xml.dom.minidom.parseString("""
-<resources>
-    <primitive id="prim1">
-    </primitive>
-    <group id="group1">
-        <primitive id="prim2">
-        </primitive>
-    </group>
-    <clone id="clone1">
-        <primitive id="prim3">
-        </primitive>
-    </clone>
-    <clone id="clone2">
-        <primitive id="prim4">
-        </primitive>
-        <meta_attributes>
-            <nvpair name="clone-max" value="9"/>
-            <nvpair name="clone-node-max" value="3"/>
-        </meta_attributes>
-    </clone>
-    <clone id="clone3">
-        <primitive id="prim5">
-        </primitive>
-        <meta_attributes>
-            <nvpair name="clone-max" value="2"/>
-            <nvpair name="clone-node-max" value="3"/>
-        </meta_attributes>
-    </clone>
-    <clone id="clone4">
-        <primitive id="prim6">
-        </primitive>
-        <meta_attributes>
-            <nvpair name="globally-unique" value="true"/>
-            <nvpair name="clone-max" value="9"/>
-        </meta_attributes>
-    </clone>
-    <clone id="clone5">
-        <primitive id="prim7">
-        </primitive>
-        <meta_attributes>
-            <nvpair name="globally-unique" value="true"/>
-            <nvpair name="clone-max" value="9"/>
-            <nvpair name="clone-node-max" value="2"/>
-        </meta_attributes>
-    </clone>
-    <clone id="clone6">
-        <primitive id="prim8">
-        </primitive>
-        <meta_attributes>
-            <nvpair name="globally-unique" value="true"/>
-            <nvpair name="clone-max" value="9"/>
-            <nvpair name="clone-node-max" value="4"/>
-        </meta_attributes>
-    </clone>
-    <master id="master1">
-        <primitive id="prim9">
-        </primitive>
-    </master>
-    <master id="master2">
-        <primitive id="prim10">
-        </primitive>
-        <meta_attributes>
-            <nvpair name="clone-max" value="9"/>
-            <nvpair name="clone-node-max" value="3"/>
-            <nvpair name="master-max" value="5"/>
-            <nvpair name="master-node-max" value="4"/>
-        </meta_attributes>
-    </master>
-    <master id="master3">
-        <primitive id="prim11">
-        </primitive>
-        <meta_attributes>
-            <nvpair name="globally-unique" value="true"/>
-            <nvpair name="clone-max" value="9"/>
-            <nvpair name="clone-node-max" value="3"/>
-        </meta_attributes>
-    </master>
-    <master id="master4">
-        <primitive id="prim12">
-        </primitive>
-        <meta_attributes>
-            <nvpair name="globally-unique" value="true"/>
-            <nvpair name="clone-max" value="9"/>
-            <nvpair name="clone-node-max" value="3"/>
-            <nvpair name="master-max" value="3"/>
-            <nvpair name="master-node-max" value="2"/>
-        </meta_attributes>
-    </master>
-    <master id="master5">
-        <primitive id="prim13">
-        </primitive>
-        <meta_attributes>
-            <nvpair name="globally-unique" value="true"/>
-            <nvpair name="clone-max" value="9"/>
-            <nvpair name="clone-node-max" value="3"/>
-            <nvpair name="master-max" value="12"/>
-            <nvpair name="master-node-max" value="4"/>
-        </meta_attributes>
-    </master>
-</resources>
-        """).documentElement
-        resources = dom.getElementsByTagName("resources")[0]
-        resources.parentNode.replaceChild(new_resources, resources)
-
-        self.assertEquals(
-            1,
-            utils.count_expected_resource_instances(
-                utils.dom_get_resource(dom, "prim1"), 3
-            )
-        )
-        self.assertEquals(
-            1,
-            utils.count_expected_resource_instances(
-                utils.dom_get_group(dom, "group1"), 3
-            )
-        )
-        self.assertEquals(
-            3,
-            utils.count_expected_resource_instances(
-                utils.dom_get_clone(dom, "clone1"), 3
-            )
-        )
-        self.assertEquals(
-            3,
-            utils.count_expected_resource_instances(
-                utils.dom_get_clone(dom, "clone2"), 3
-            )
-        )
-        self.assertEquals(
-            2,
-            utils.count_expected_resource_instances(
-                utils.dom_get_clone(dom, "clone3"), 3
-            )
-        )
-        self.assertEquals(
-            3,
-            utils.count_expected_resource_instances(
-                utils.dom_get_clone(dom, "clone4"), 3
-            )
-        )
-        self.assertEquals(
-            6,
-            utils.count_expected_resource_instances(
-                utils.dom_get_clone(dom, "clone5"), 3
-            )
-        )
-        self.assertEquals(
-            9,
-            utils.count_expected_resource_instances(
-                utils.dom_get_clone(dom, "clone6"), 3
-            )
-        )
-        self.assertEquals(
-            1,
-            utils.count_expected_resource_instances(
-                utils.dom_get_master(dom, "master1"), 3
-            )
-        )
-        self.assertEquals(
-            3,
-            utils.count_expected_resource_instances(
-                utils.dom_get_master(dom, "master2"), 3
-            )
-        )
-        self.assertEquals(
-            1,
-            utils.count_expected_resource_instances(
-                utils.dom_get_master(dom, "master3"), 3
-            )
-        )
-        self.assertEquals(
-            3,
-            utils.count_expected_resource_instances(
-                utils.dom_get_master(dom, "master4"), 3
-            )
-        )
-        self.assertEquals(
-            9,
-            utils.count_expected_resource_instances(
-                utils.dom_get_master(dom, "master5"), 3
-            )
-        )
-
     def test_parse_cman_quorum_info(self):
         parsed = utils.parse_cman_quorum_info("""\
 Version: 6.2.0
@@ -1269,9 +1006,9 @@ Node addresses: 192.168.122.61
 2 M 2 rh66-node2
 3 M 1 rh66-node3
 """)
-        self.assertEquals(True, parsed["quorate"])
-        self.assertEquals(2, parsed["quorum"])
-        self.assertEquals(
+        self.assertEqual(True, parsed["quorate"])
+        self.assertEqual(2, parsed["quorum"])
+        self.assertEqual(
             [
                 {"name": "rh66-node1", "votes": 3, "local": False},
                 {"name": "rh66-node2", "votes": 2, "local": True},
@@ -1305,9 +1042,9 @@ Node addresses: 192.168.122.61
 2 X 2 rh66-node2
 3 X 1 rh66-node3
 """)
-        self.assertEquals(False, parsed["quorate"])
-        self.assertEquals(2, parsed["quorum"])
-        self.assertEquals(
+        self.assertEqual(False, parsed["quorate"])
+        self.assertEqual(2, parsed["quorum"])
+        self.assertEqual(
             [
                 {"name": "rh66-node1", "votes": 3, "local": True},
             ],
@@ -1315,7 +1052,7 @@ Node addresses: 192.168.122.61
         )
 
         parsed = utils.parse_cman_quorum_info("")
-        self.assertEquals(None, parsed)
+        self.assertEqual(None, parsed)
 
         parsed = utils.parse_cman_quorum_info("""\
 Version: 6.2.0
@@ -1342,7 +1079,7 @@ Node addresses: 192.168.122.61
 2 M 2 rh66-node2
 3 M 1 rh66-node3
 """)
-        self.assertEquals(None, parsed)
+        self.assertEqual(None, parsed)
 
         parsed = utils.parse_cman_quorum_info("""\
 Version: 6.2.0
@@ -1369,7 +1106,7 @@ Node addresses: 192.168.122.61
 2 M 2 rh66-node2
 3 M 1 rh66-node3
 """)
-        self.assertEquals(None, parsed)
+        self.assertEqual(None, parsed)
 
         parsed = utils.parse_cman_quorum_info("""\
 Version: 6.2.0
@@ -1396,7 +1133,7 @@ Node addresses: 192.168.122.61
 2 M Foo rh66-node2
 3 M 1 rh66-node3
 """)
-        self.assertEquals(None, parsed)
+        self.assertEqual(None, parsed)
 
     def test_parse_quorumtool_output(self):
         parsed = utils.parse_quorumtool_output("""\
@@ -1424,9 +1161,9 @@ Membership information
          2          2         NR rh70-node2 (local)
          3          1         NR rh70-node3
 """)
-        self.assertEquals(True, parsed["quorate"])
-        self.assertEquals(2, parsed["quorum"])
-        self.assertEquals(
+        self.assertEqual(True, parsed["quorate"])
+        self.assertEqual(2, parsed["quorum"])
+        self.assertEqual(
             [
                 {"name": "rh70-node1", "votes": 3, "local": False},
                 {"name": "rh70-node2", "votes": 2, "local": True},
@@ -1458,9 +1195,9 @@ Membership information
     Nodeid      Votes    Qdevice Name
              1          1         NR rh70-node1 (local)
 """)
-        self.assertEquals(False, parsed["quorate"])
-        self.assertEquals(2, parsed["quorum"])
-        self.assertEquals(
+        self.assertEqual(False, parsed["quorate"])
+        self.assertEqual(2, parsed["quorum"])
+        self.assertEqual(
             [
                 {"name": "rh70-node1", "votes": 1, "local": True},
             ],
@@ -1468,7 +1205,7 @@ Membership information
         )
 
         parsed = utils.parse_quorumtool_output("")
-        self.assertEquals(None, parsed)
+        self.assertEqual(None, parsed)
 
         parsed = utils.parse_quorumtool_output("""\
 Quorum information
@@ -1495,7 +1232,7 @@ Membership information
          2          1         NR rh70-node2
          3          1         NR rh70-node3
 """)
-        self.assertEquals(None, parsed)
+        self.assertEqual(None, parsed)
 
         parsed = utils.parse_quorumtool_output("""\
 Quorum information
@@ -1522,7 +1259,7 @@ Membership information
          2          1         NR rh70-node2
          3          1         NR rh70-node3
 """)
-        self.assertEquals(None, parsed)
+        self.assertEqual(None, parsed)
 
         parsed = utils.parse_quorumtool_output("""\
 Quorum information
@@ -1549,13 +1286,13 @@ Membership information
          2        foo         NR rh70-node2
          3          1         NR rh70-node3
 """)
-        self.assertEquals(None, parsed)
+        self.assertEqual(None, parsed)
 
     def test_is_node_stop_cause_quorum_loss(self):
         quorum_info = {
             "quorate": False,
         }
-        self.assertEquals(
+        self.assertEqual(
             False,
             utils.is_node_stop_cause_quorum_loss(quorum_info, True)
         )
@@ -1567,7 +1304,7 @@ Membership information
                 {"name": "rh70-node3", "votes": 1, "local": False},
             ],
         }
-        self.assertEquals(
+        self.assertEqual(
             False,
             utils.is_node_stop_cause_quorum_loss(quorum_info, True)
         )
@@ -1579,7 +1316,7 @@ Membership information
                 {"name": "rh70-node3", "votes": 1, "local": True},
             ],
         }
-        self.assertEquals(
+        self.assertEqual(
             True,
             utils.is_node_stop_cause_quorum_loss(quorum_info, True)
         )
@@ -1593,7 +1330,7 @@ Membership information
                 {"name": "rh70-node3", "votes": 1, "local": True},
             ],
         }
-        self.assertEquals(
+        self.assertEqual(
             False,
             utils.is_node_stop_cause_quorum_loss(quorum_info, True)
         )
@@ -1607,7 +1344,7 @@ Membership information
                 {"name": "rh70-node3", "votes": 1, "local": False},
             ],
         }
-        self.assertEquals(
+        self.assertEqual(
             False,
             utils.is_node_stop_cause_quorum_loss(quorum_info, True)
         )
@@ -1621,7 +1358,7 @@ Membership information
                 {"name": "rh70-node3", "votes": 1, "local": False},
             ],
         }
-        self.assertEquals(
+        self.assertEqual(
             True,
             utils.is_node_stop_cause_quorum_loss(quorum_info, True)
         )
@@ -1636,7 +1373,7 @@ Membership information
                 {"name": "rh70-node3", "votes": 1, "local": False},
             ],
         }
-        self.assertEquals(
+        self.assertEqual(
             False,
             utils.is_node_stop_cause_quorum_loss(
                 quorum_info, False, ["rh70-node3"]
@@ -1652,7 +1389,7 @@ Membership information
                 {"name": "rh70-node3", "votes": 1, "local": False},
             ],
         }
-        self.assertEquals(
+        self.assertEqual(
             False,
             utils.is_node_stop_cause_quorum_loss(
                 quorum_info, False, ["rh70-node2"]
@@ -1668,7 +1405,7 @@ Membership information
                 {"name": "rh70-node3", "votes": 1, "local": False},
             ],
         }
-        self.assertEquals(
+        self.assertEqual(
             True,
             utils.is_node_stop_cause_quorum_loss(
                 quorum_info, False, ["rh70-node1"]
@@ -1684,7 +1421,7 @@ Membership information
                 {"name": "rh70-node3", "votes": 1, "local": False},
             ],
         }
-        self.assertEquals(
+        self.assertEqual(
             False,
             utils.is_node_stop_cause_quorum_loss(
                 quorum_info, False, ["rh70-node2", "rh70-node3"]
@@ -1700,20 +1437,558 @@ Membership information
                 {"name": "rh70-node3", "votes": 1, "local": False},
             ],
         }
-        self.assertEquals(
+        self.assertEqual(
             True,
             utils.is_node_stop_cause_quorum_loss(
                 quorum_info, False, ["rh70-node2", "rh70-node3"]
             )
         )
 
+    def test_get_operations_from_transitions(self):
+        transitions = utils.parse(os.path.join(currentdir, "transitions01.xml"))
+        self.assertEqual(
+            [
+                {
+                    'id': 'dummy',
+                    'long_id': 'dummy',
+                    'operation': 'stop',
+                    'on_node': 'rh7-3',
+                },
+                {
+                    'id': 'dummy',
+                    'long_id': 'dummy',
+                    'operation': 'start',
+                    'on_node': 'rh7-2',
+                },
+                {
+                    'id': 'd0',
+                    'long_id': 'd0:1',
+                    'operation': 'stop',
+                    'on_node': 'rh7-1',
+                },
+                {
+                    'id': 'd0',
+                    'long_id': 'd0:1',
+                    'operation': 'start',
+                    'on_node': 'rh7-2',
+                },
+                {
+                    'id': 'state',
+                    'long_id': 'state:0',
+                    'operation': 'stop',
+                    'on_node': 'rh7-3',
+                },
+                {
+                    'id': 'state',
+                    'long_id': 'state:0',
+                    'operation': 'start',
+                    'on_node': 'rh7-2',
+                },
+            ],
+            utils.get_operations_from_transitions(transitions)
+        )
+
+        transitions = utils.parse(os.path.join(currentdir, "transitions02.xml"))
+        self.assertEqual(
+            [
+                {
+                    "id": "RemoteNode",
+                    "long_id": "RemoteNode",
+                    "operation": "stop",
+                    "on_node": "virt-143",
+                },
+                {
+                    "id": "RemoteNode",
+                    "long_id": "RemoteNode",
+                    "operation": "migrate_to",
+                    "on_node": "virt-143",
+                },
+                {
+                    "id": "RemoteNode",
+                    "long_id": "RemoteNode",
+                    "operation": "migrate_from",
+                    "on_node": "virt-142",
+                },
+                {
+                    "id": "dummy8",
+                    "long_id": "dummy8",
+                    "operation": "stop",
+                    "on_node": "virt-143",
+                },
+                {
+                    "id": "dummy8",
+                    "long_id": "dummy8",
+                    "operation": "start",
+                    "on_node": "virt-142",
+                }
+            ],
+            utils.get_operations_from_transitions(transitions)
+        )
+
+    def test_get_resources_location_from_operations(self):
+        cib_dom = self.get_cib_resources()
+
+        operations = []
+        self.assertEqual(
+            {},
+            utils.get_resources_location_from_operations(cib_dom, operations)
+        )
+
+        operations = [
+            {
+                "id": "myResource",
+                "long_id": "myResource",
+                "operation": "start",
+                "on_node": "rh7-1",
+            },
+        ]
+        self.assertEqual(
+            {
+                'myResource': {
+                    'id': 'myResource',
+                    'id_for_constraint': 'myResource',
+                    'long_id': 'myResource',
+                    'start_on_node': 'rh7-1',
+                 },
+            },
+            utils.get_resources_location_from_operations(cib_dom, operations)
+        )
+
+        operations = [
+            {
+                "id": "myResource",
+                "long_id": "myResource",
+                "operation": "start",
+                "on_node": "rh7-1",
+            },
+            {
+                "id": "myResource",
+                "long_id": "myResource",
+                "operation": "start",
+                "on_node": "rh7-2",
+            },
+            {
+                "id": "myResource",
+                "long_id": "myResource",
+                "operation": "monitor",
+                "on_node": "rh7-3",
+            },
+            {
+                "id": "myResource",
+                "long_id": "myResource",
+                "operation": "stop",
+                "on_node": "rh7-3",
+            },
+        ]
+        self.assertEqual(
+            {
+                'myResource': {
+                    'id': 'myResource',
+                    'id_for_constraint': 'myResource',
+                    'long_id': 'myResource',
+                    'start_on_node': 'rh7-2',
+                 },
+            },
+            utils.get_resources_location_from_operations(cib_dom, operations)
+        )
+
+        operations = [
+            {
+                "id": "myResource",
+                "long_id": "myResource",
+                "operation": "start",
+                "on_node": "rh7-1",
+            },
+            {
+                "id": "myClonedResource",
+                "long_id": "myClonedResource:0",
+                "operation": "start",
+                "on_node": "rh7-1",
+            },
+            {
+                "id": "myClonedResource",
+                "long_id": "myClonedResource:0",
+                "operation": "start",
+                "on_node": "rh7-2",
+            },
+            {
+                "id": "myClonedResource",
+                "long_id": "myClonedResource:1",
+                "operation": "start",
+                "on_node": "rh7-3",
+            },
+        ]
+        self.assertEqual(
+            {
+                'myResource': {
+                    'id': 'myResource',
+                    'id_for_constraint': 'myResource',
+                    'long_id': 'myResource',
+                    'start_on_node': 'rh7-1',
+                 },
+                'myClonedResource:0': {
+                    'id': 'myClonedResource',
+                    'id_for_constraint': 'myClone',
+                    'long_id': 'myClonedResource:0',
+                    'start_on_node': 'rh7-2',
+                 },
+                'myClonedResource:1': {
+                    'id': 'myClonedResource',
+                    'id_for_constraint': 'myClone',
+                    'long_id': 'myClonedResource:1',
+                    'start_on_node': 'rh7-3',
+                 },
+            },
+            utils.get_resources_location_from_operations(cib_dom, operations)
+        )
+
+        operations = [
+            {
+                "id": "myUniqueClonedResource:0",
+                "long_id": "myUniqueClonedResource:0",
+                "operation": "start",
+                "on_node": "rh7-1",
+            },
+            {
+                "id": "myUniqueClonedResource:1",
+                "long_id": "myUniqueClonedResource:1",
+                "operation": "monitor",
+                "on_node": "rh7-2",
+            },
+            {
+                "id": "myUniqueClonedResource:2",
+                "long_id": "myUniqueClonedResource:2",
+                "operation": "start",
+                "on_node": "rh7-3",
+            },
+        ]
+        self.assertEqual(
+            {
+                'myUniqueClonedResource:0': {
+                    'id': 'myUniqueClonedResource:0',
+                    'id_for_constraint': 'myUniqueClone',
+                    'long_id': 'myUniqueClonedResource:0',
+                    'start_on_node': 'rh7-1',
+                 },
+                'myUniqueClonedResource:2': {
+                    'id': 'myUniqueClonedResource:2',
+                    'id_for_constraint': 'myUniqueClone',
+                    'long_id': 'myUniqueClonedResource:2',
+                    'start_on_node': 'rh7-3',
+                 },
+            },
+            utils.get_resources_location_from_operations(cib_dom, operations)
+        )
+
+        operations = [
+            {
+                "id": "myMasteredGroupedResource",
+                "long_id": "myMasteredGroupedResource:0",
+                "operation": "start",
+                "on_node": "rh7-1",
+            },
+            {
+                "id": "myMasteredGroupedResource",
+                "long_id": "myMasteredGroupedResource:1",
+                "operation": "demote",
+                "on_node": "rh7-2",
+            },
+            {
+                "id": "myMasteredGroupedResource",
+                "long_id": "myMasteredGroupedResource:1",
+                "operation": "promote",
+                "on_node": "rh7-3",
+            },
+        ]
+        self.assertEqual(
+            {
+                'myMasteredGroupedResource:0': {
+                    'id': 'myMasteredGroupedResource',
+                    'id_for_constraint': 'myGroupMaster',
+                    'long_id': 'myMasteredGroupedResource:0',
+                    'start_on_node': 'rh7-1',
+                 },
+                'myMasteredGroupedResource:1': {
+                    'id': 'myMasteredGroupedResource',
+                    'id_for_constraint': 'myGroupMaster',
+                    'long_id': 'myMasteredGroupedResource:1',
+                    'promote_on_node': 'rh7-3',
+                 },
+            },
+            utils.get_resources_location_from_operations(cib_dom, operations)
+        )
+
+        operations = [
+            {
+                "id": "myResource",
+                "long_id": "myResource",
+                "operation": "stop",
+                "on_node": "rh7-1",
+            },
+            {
+                "id": "myResource",
+                "long_id": "myResource",
+                "operation": "migrate_to",
+                "on_node": "rh7-1",
+            },
+            {
+                "id": "myResource",
+                "long_id": "myResource",
+                "operation": "migrate_from",
+                "on_node": "rh7-2",
+            },
+        ]
+        self.assertEqual(
+            {
+                "myResource": {
+                    "id": "myResource",
+                    "id_for_constraint": "myResource",
+                    "long_id": "myResource",
+                    "start_on_node": "rh7-2",
+                },
+            },
+            utils.get_resources_location_from_operations(cib_dom, operations)
+        )
+
+    def test_is_int(self):
+        self.assertTrue(utils.is_int("-999"))
+        self.assertTrue(utils.is_int("-1"))
+        self.assertTrue(utils.is_int("0"))
+        self.assertTrue(utils.is_int("1"))
+        self.assertTrue(utils.is_int("99999"))
+        self.assertTrue(utils.is_int(" 99999  "))
+        self.assertFalse(utils.is_int("0.0"))
+        self.assertFalse(utils.is_int("-1.0"))
+        self.assertFalse(utils.is_int("-0.1"))
+        self.assertFalse(utils.is_int("0.001"))
+        self.assertFalse(utils.is_int("-999999.1"))
+        self.assertFalse(utils.is_int("0.0001"))
+        self.assertFalse(utils.is_int(""))
+        self.assertFalse(utils.is_int("   "))
+        self.assertFalse(utils.is_int("A"))
+        self.assertFalse(utils.is_int("random 15 47 text  "))
+
+    def test_dom_get_node(self):
+        cib = self.get_cib_with_nodes_minidom()
+        #assertIsNone is not supported in python 2.6
+        self.assertTrue(utils.dom_get_node(cib, "non-existing-node") is None)
+        node = utils.dom_get_node(cib, "rh7-1")
+        self.assertEqual(node.getAttribute("uname"), "rh7-1")
+        self.assertEqual(node.getAttribute("id"), "1")
+
+    def test_dom_prepare_child_element(self):
+        cib = self.get_cib_with_nodes_minidom()
+        node = cib.getElementsByTagName("node")[0]
+        self.assertEqual(len(get_child_elemets(node)), 0)
+        child = utils.dom_prepare_child_element(node, "utilization", "rh7-1-")
+        self.assertEqual(len(get_child_elemets(node)), 1)
+        self.assertEqual(child, get_child_elemets(node)[0])
+        self.assertEqual(get_child_elemets(node)[0].tagName, "utilization")
+        self.assertEqual(
+            get_child_elemets(node)[0].getAttribute("id"), "rh7-1-utilization"
+        )
+        child2 = utils.dom_prepare_child_element(node, "utilization", "rh7-1-")
+        self.assertEqual(len(get_child_elemets(node)), 1)
+        self.assertEqual(child, child2)
+
+    def test_dom_update_nv_pair_add(self):
+        nv_set = xml.dom.minidom.parseString("<nvset/>").documentElement
+        utils.dom_update_nv_pair(nv_set, "test_name", "test_val", "prefix-")
+        self.assertEqual(len(get_child_elemets(nv_set)), 1)
+        pair = get_child_elemets(nv_set)[0]
+        self.assertEqual(pair.getAttribute("name"), "test_name")
+        self.assertEqual(pair.getAttribute("value"), "test_val")
+        self.assertEqual(pair.getAttribute("id"), "prefix-test_name")
+        utils.dom_update_nv_pair(nv_set, "another_name", "value", "prefix2-")
+        self.assertEqual(len(get_child_elemets(nv_set)), 2)
+        self.assertEqual(pair, get_child_elemets(nv_set)[0])
+        pair = get_child_elemets(nv_set)[1]
+        self.assertEqual(pair.getAttribute("name"), "another_name")
+        self.assertEqual(pair.getAttribute("value"), "value")
+        self.assertEqual(pair.getAttribute("id"), "prefix2-another_name")
+
+    def test_dom_update_nv_pair_update(self):
+        nv_set = xml.dom.minidom.parseString("""
+        <nv_set>
+            <nvpair id="prefix-test_name" name="test_name" value="test_val"/>
+            <nvpair id="prefix2-another_name" name="another_name" value="value"/>
+        </nv_set>
+        """).documentElement
+        utils.dom_update_nv_pair(nv_set, "test_name", "new_value")
+        self.assertEqual(len(get_child_elemets(nv_set)), 2)
+        pair1 = get_child_elemets(nv_set)[0]
+        pair2 = get_child_elemets(nv_set)[1]
+        self.assertEqual(pair1.getAttribute("name"), "test_name")
+        self.assertEqual(pair1.getAttribute("value"), "new_value")
+        self.assertEqual(pair1.getAttribute("id"), "prefix-test_name")
+        self.assertEqual(pair2.getAttribute("name"), "another_name")
+        self.assertEqual(pair2.getAttribute("value"), "value")
+        self.assertEqual(pair2.getAttribute("id"), "prefix2-another_name")
+
+    def test_dom_update_nv_pair_remove(self):
+        nv_set = xml.dom.minidom.parseString("""
+        <nv_set>
+            <nvpair id="prefix-test_name" name="test_name" value="test_val"/>
+            <nvpair id="prefix2-another_name" name="another_name" value="value"/>
+        </nv_set>
+        """).documentElement
+        utils.dom_update_nv_pair(nv_set, "non_existing_name", "")
+        self.assertEqual(len(get_child_elemets(nv_set)), 2)
+        utils.dom_update_nv_pair(nv_set, "another_name", "")
+        self.assertEqual(len(get_child_elemets(nv_set)), 1)
+        pair = get_child_elemets(nv_set)[0]
+        self.assertEqual(pair.getAttribute("name"), "test_name")
+        self.assertEqual(pair.getAttribute("value"), "test_val")
+        self.assertEqual(pair.getAttribute("id"), "prefix-test_name")
+        utils.dom_update_nv_pair(nv_set, "test_name", "")
+        self.assertEqual(len(get_child_elemets(nv_set)), 0)
+
+    def test_convert_args_to_tuples(self):
+        out = utils.convert_args_to_tuples(
+            ["invalid_string", "key=value", "key2=val=ue", "k e y= v a l u e "]
+        )
+        self.assertEqual(
+            out,
+            [("key", "value"), ("key2", "val=ue"), ("k e y", " v a l u e ")]
+        )
+
+    def test_dom_update_utilization_invalid(self):
+        el = xml.dom.minidom.parseString("""
+        <resource id="test_id"/>
+        """).documentElement
+        self.assertRaises(
+            SystemExit,
+            utils.dom_update_utilization, el, [("name", "invalid_val")]
+        )
+
+        self.assertRaises(
+            SystemExit,
+            utils.dom_update_utilization, el, [("name", "0.01")]
+        )
+
+    def test_dom_update_utilization_add(self):
+        el = xml.dom.minidom.parseString("""
+        <resource id="test_id"/>
+        """).documentElement
+        utils.dom_update_utilization(
+            el, [("name", ""), ("key", "-1"), ("keys", "90")]
+        )
+
+        self.assertEqual(len(get_child_elemets(el)), 1)
+        u = get_child_elemets(el)[0]
+        self.assertEqual(u.tagName, "utilization")
+        self.assertEqual(u.getAttribute("id"), "test_id-utilization")
+        self.assertEqual(len(get_child_elemets(u)), 2)
+
+        self.assertEqual(
+            get_child_elemets(u)[0].getAttribute("id"), "test_id-utilization-key"
+        )
+        self.assertEqual(get_child_elemets(u)[0].getAttribute("name"), "key")
+        self.assertEqual(get_child_elemets(u)[0].getAttribute("value"), "-1")
+        self.assertEqual(
+            get_child_elemets(u)[1].getAttribute("id"), "test_id-utilization-keys"
+        )
+        self.assertEqual(get_child_elemets(u)[1].getAttribute("name"), "keys")
+        self.assertEqual(get_child_elemets(u)[1].getAttribute("value"), "90")
+
+    def test_dom_update_utilization_update_remove(self):
+        el = xml.dom.minidom.parseString("""
+        <resource id="test_id">
+            <utilization id="test_id-utilization">
+                <nvpair id="test_id-utilization-key" name="key" value="-1"/>
+                <nvpair id="test_id-utilization-keys" name="keys" value="90"/>
+            </utilization>
+        </resource>
+        """).documentElement
+        utils.dom_update_utilization(
+            el, [("key", "100"), ("keys", "")]
+        )
+
+        u = get_child_elemets(el)[0]
+        self.assertEqual(len(get_child_elemets(u)), 1)
+        self.assertEqual(
+            get_child_elemets(u)[0].getAttribute("id"), "test_id-utilization-key"
+        )
+        self.assertEqual(get_child_elemets(u)[0].getAttribute("name"), "key")
+        self.assertEqual(get_child_elemets(u)[0].getAttribute("value"), "100")
+
+    def test_dom_update_meta_attr_add(self):
+        el = xml.dom.minidom.parseString("""
+        <resource id="test_id"/>
+        """).documentElement
+        utils.dom_update_meta_attr(
+            el, [("name", ""), ("key", "test"), ("key2", "val")]
+        )
+
+        self.assertEqual(len(get_child_elemets(el)), 1)
+        u = get_child_elemets(el)[0]
+        self.assertEqual(u.tagName, "meta_attributes")
+        self.assertEqual(u.getAttribute("id"), "test_id-meta_attributes")
+        self.assertEqual(len(get_child_elemets(u)), 2)
+
+        self.assertEqual(
+            get_child_elemets(u)[0].getAttribute("id"), "test_id-meta_attributes-key"
+        )
+        self.assertEqual(get_child_elemets(u)[0].getAttribute("name"), "key")
+        self.assertEqual(get_child_elemets(u)[0].getAttribute("value"), "test")
+        self.assertEqual(
+            get_child_elemets(u)[1].getAttribute("id"), "test_id-meta_attributes-key2"
+        )
+        self.assertEqual(get_child_elemets(u)[1].getAttribute("name"), "key2")
+        self.assertEqual(get_child_elemets(u)[1].getAttribute("value"), "val")
+
+    def test_dom_update_meta_attr_update_remove(self):
+        el = xml.dom.minidom.parseString("""
+        <resource id="test_id">
+            <meta_attributes id="test_id-utilization">
+                <nvpair id="test_id-meta_attributes-key" name="key" value="test"/>
+                <nvpair id="test_id-meta_attributes-key2" name="key2" value="val"/>
+            </meta_attributes>
+        </resource>
+        """).documentElement
+        utils.dom_update_meta_attr(
+            el, [("key", "another_val"), ("key2", "")]
+        )
+
+        u = get_child_elemets(el)[0]
+        self.assertEqual(len(get_child_elemets(u)), 1)
+        self.assertEqual(
+            get_child_elemets(u)[0].getAttribute("id"), "test_id-meta_attributes-key"
+        )
+        self.assertEqual(get_child_elemets(u)[0].getAttribute("name"), "key")
+        self.assertEqual(get_child_elemets(u)[0].getAttribute("value"), "another_val")
+
+    def test_get_utilization(self):
+        el = xml.dom.minidom.parseString("""
+        <resource id="test_id">
+            <utilization id="test_id-utilization">
+                <nvpair id="test_id-utilization-key" name="key" value="-1"/>
+                <nvpair id="test_id-utilization-keys" name="keys" value="90"/>
+            </utilization>
+        </resource>
+        """).documentElement
+        self.assertEqual({"key": "-1", "keys": "90"}, utils.get_utilization(el))
+
+    def test_get_utilization_str(self):
+        el = xml.dom.minidom.parseString("""
+        <resource id="test_id">
+            <utilization id="test_id-utilization">
+                <nvpair id="test_id-utilization-key" name="key" value="-1"/>
+                <nvpair id="test_id-utilization-keys" name="keys" value="90"/>
+            </utilization>
+        </resource>
+        """).documentElement
+        self.assertEqual("key=-1 keys=90", utils.get_utilization_str(el))
+
     def assert_element_id(self, node, node_id):
         self.assertTrue(
             isinstance(node, xml.dom.minidom.Element),
             "element with id '%s' not found" % node_id
         )
-        self.assertEquals(node.getAttribute("id"), node_id)
+        self.assertEqual(node.getAttribute("id"), node_id)
 
+def get_child_elemets(el):
+    return [e for e in el.childNodes if e.nodeType == xml.dom.minidom.Node.ELEMENT_NODE]
 
 if __name__ == "__main__":
     unittest.main()
diff --git a/pcs/test/transitions01.xml b/pcs/test/transitions01.xml
new file mode 100644
index 0000000..54c33a3
--- /dev/null
+++ b/pcs/test/transitions01.xml
@@ -0,0 +1,296 @@
+<transition_graph cluster-delay="60s" stonith-timeout="60s" failed-stop-offset="INFINITY" failed-start-offset="INFINITY" batch-limit="0" transition_id="0">
+  <synapse id="0">
+    <action_set>
+      <rsc_op id="19" operation="monitor" operation_key="dummy_monitor_10000" on_node="rh7-2" on_node_uuid="2">
+        <primitive id="dummy" class="ocf" provider="heartbeat" type="Dummy"/>
+        <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="18" operation="start" operation_key="dummy_start_0" on_node="rh7-2" on_node_uuid="2"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="1">
+    <action_set>
+      <rsc_op id="18" operation="start" operation_key="dummy_start_0" on_node="rh7-2" on_node_uuid="2">
+        <primitive id="dummy" class="ocf" provider="heartbeat" type="Dummy"/>
+        <attributes CRM_meta_name="start" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="17" operation="stop" operation_key="dummy_stop_0" on_node="rh7-3" on_node_uuid="3"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="2">
+    <action_set>
+      <rsc_op id="17" operation="stop" operation_key="dummy_stop_0" on_node="rh7-3" on_node_uuid="3">
+        <primitive id="dummy" class="ocf" provider="heartbeat" type="Dummy"/>
+        <attributes CRM_meta_name="stop" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="3">
+    <action_set>
+      <rsc_op id="24" operation="monitor" operation_key="d0_monitor_10000" internal_operation_key="d0:1_monitor_10000" on_node="rh7-2" on_node_uuid="2">
+        <primitive id="d0" long-id="d0:1" class="ocf" provider="heartbeat" type="Dummy"/>
+        <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="23" operation="start" operation_key="d0_start_0" internal_operation_key="d0:1_start_0" on_node="rh7-2" on_node_uuid="2"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="4">
+    <action_set>
+      <rsc_op id="23" operation="start" operation_key="d0_start_0" internal_operation_key="d0:1_start_0" on_node="rh7-2" on_node_uuid="2">
+        <primitive id="d0" long-id="d0:1" class="ocf" provider="heartbeat" type="Dummy"/>
+        <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="22" operation="stop" operation_key="d0_stop_0" internal_operation_key="d0:1_stop_0" on_node="rh7-1" on_node_uuid="1"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="25" operation="start" operation_key="d0-clone_start_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="5">
+    <action_set>
+      <rsc_op id="22" operation="stop" operation_key="d0_stop_0" internal_operation_key="d0:1_stop_0" on_node="rh7-1" on_node_uuid="1">
+        <primitive id="d0" long-id="d0:1" class="ocf" provider="heartbeat" type="Dummy"/>
+        <attributes CRM_meta_clone="1" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_name="stop" CRM_meta_notify="false" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="27" operation="stop" operation_key="d0-clone_stop_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="6" priority="1000000">
+    <action_set>
+      <pseudo_event id="28" operation="stopped" operation_key="d0-clone_stopped_0">
+        <attributes CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="22" operation="stop" operation_key="d0_stop_0" internal_operation_key="d0:1_stop_0" on_node="rh7-1" on_node_uuid="1"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="27" operation="stop" operation_key="d0-clone_stop_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="7">
+    <action_set>
+      <pseudo_event id="27" operation="stop" operation_key="d0-clone_stop_0">
+        <attributes CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </pseudo_event>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="8" priority="1000000">
+    <action_set>
+      <pseudo_event id="26" operation="running" operation_key="d0-clone_running_0">
+        <attributes CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="23" operation="start" operation_key="d0_start_0" internal_operation_key="d0:1_start_0" on_node="rh7-2" on_node_uuid="2"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="25" operation="start" operation_key="d0-clone_start_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="9">
+    <action_set>
+      <pseudo_event id="25" operation="start" operation_key="d0-clone_start_0">
+        <attributes CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_notify="false" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="28" operation="stopped" operation_key="d0-clone_stopped_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="10">
+    <action_set>
+      <pseudo_event id="35" operation="stopped" operation_key="gr:0_stopped_0">
+        <attributes CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="29" operation="stop" operation_key="state_stop_0" internal_operation_key="state:0_stop_0" on_node="rh7-3" on_node_uuid="3"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="34" operation="stop" operation_key="gr:0_stop_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="11">
+    <action_set>
+      <pseudo_event id="34" operation="stop" operation_key="gr:0_stop_0">
+        <attributes CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="62" operation="stop" operation_key="gr-master_stop_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="12">
+    <action_set>
+      <pseudo_event id="33" operation="running" operation_key="gr:0_running_0">
+        <attributes CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="30" operation="start" operation_key="state_start_0" internal_operation_key="state:0_start_0" on_node="rh7-2" on_node_uuid="2"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="32" operation="start" operation_key="gr:0_start_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="13">
+    <action_set>
+      <pseudo_event id="32" operation="start" operation_key="gr:0_start_0">
+        <attributes CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="35" operation="stopped" operation_key="gr:0_stopped_0"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="60" operation="start" operation_key="gr-master_start_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="14">
+    <action_set>
+      <rsc_op id="31" operation="monitor" operation_key="state_monitor_11000" internal_operation_key="state:0_monitor_11000" on_node="rh7-2" on_node_uuid="2">
+        <primitive id="state" long-id="state:0" class="ocf" provider="pacemaker" type="Stateful"/>
+        <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_interval="11000" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="monitor" CRM_meta_notify="false" CRM_meta_role="Slave" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="30" operation="start" operation_key="state_start_0" internal_operation_key="state:0_start_0" on_node="rh7-2" on_node_uuid="2"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="15">
+    <action_set>
+      <rsc_op id="30" operation="start" operation_key="state_start_0" internal_operation_key="state:0_start_0" on_node="rh7-2" on_node_uuid="2">
+        <primitive id="state" long-id="state:0" class="ocf" provider="pacemaker" type="Stateful"/>
+        <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="start" CRM_meta_notify="false" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="29" operation="stop" operation_key="state_stop_0" internal_operation_key="state:0_stop_0" on_node="rh7-3" on_node_uuid="3"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="32" operation="start" operation_key="gr:0_start_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="16">
+    <action_set>
+      <rsc_op id="29" operation="stop" operation_key="state_stop_0" internal_operation_key="state:0_stop_0" on_node="rh7-3" on_node_uuid="3">
+        <primitive id="state" long-id="state:0" class="ocf" provider="pacemaker" type="Stateful"/>
+        <attributes CRM_meta_clone="0" CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_name="stop" CRM_meta_notify="false" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="34" operation="stop" operation_key="gr:0_stop_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="17" priority="1000000">
+    <action_set>
+      <pseudo_event id="63" operation="stopped" operation_key="gr-master_stopped_0">
+        <attributes CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="35" operation="stopped" operation_key="gr:0_stopped_0"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="62" operation="stop" operation_key="gr-master_stop_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="18">
+    <action_set>
+      <pseudo_event id="62" operation="stop" operation_key="gr-master_stop_0">
+        <attributes CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </pseudo_event>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="19" priority="1000000">
+    <action_set>
+      <pseudo_event id="61" operation="running" operation_key="gr-master_running_0">
+        <attributes CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="33" operation="running" operation_key="gr:0_running_0"/>
+      </trigger>
+      <trigger>
+        <pseudo_event id="60" operation="start" operation_key="gr-master_start_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="20">
+    <action_set>
+      <pseudo_event id="60" operation="start" operation_key="gr-master_start_0">
+        <attributes CRM_meta_clone_max="2" CRM_meta_clone_node_max="1" CRM_meta_globally_unique="false" CRM_meta_master_max="1" CRM_meta_master_node_max="1" CRM_meta_notify="false" CRM_meta_timeout="20000" crm_feature_set="3.0.9"/>
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="63" operation="stopped" operation_key="gr-master_stopped_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="21">
+    <action_set>
+      <pseudo_event id="8" operation="all_stopped" operation_key="all_stopped">
+        <attributes crm_feature_set="3.0.9"/>
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="17" operation="stop" operation_key="dummy_stop_0" on_node="rh7-3" on_node_uuid="3"/>
+      </trigger>
+      <trigger>
+        <rsc_op id="22" operation="stop" operation_key="d0_stop_0" internal_operation_key="d0:1_stop_0" on_node="rh7-1" on_node_uuid="1"/>
+      </trigger>
+      <trigger>
+        <rsc_op id="29" operation="stop" operation_key="state_stop_0" internal_operation_key="state:0_stop_0" on_node="rh7-3" on_node_uuid="3"/>
+      </trigger>
+    </inputs>
+  </synapse>
+</transition_graph>
diff --git a/pcs/test/transitions02.xml b/pcs/test/transitions02.xml
new file mode 100644
index 0000000..d1e4888
--- /dev/null
+++ b/pcs/test/transitions02.xml
@@ -0,0 +1,116 @@
+<?xml version="1.0" encoding="UTF-8"?>
+<transition_graph batch-limit="0" cluster-delay="60s" failed-start-offset="INFINITY" failed-stop-offset="INFINITY" stonith-timeout="60s" transition_id="0">
+  <synapse id="0">
+    <action_set>
+      <rsc_op id="30" on_node="virt-142" on_node_uuid="1" operation="migrate_from" operation_key="RemoteNode_migrate_from_0">
+        <primitive class="ocf" id="RemoteNode" provider="pacemaker" type="remote"/>
+        <attributes CRM_meta_migrate_source="virt-143" CRM_meta_migrate_target="virt-142" CRM_meta_remote_node="virt-145" CRM_meta_timeout="20000" crm_feature_set="3.0.10" server="virt-145"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="29" on_node="virt-143" on_node_uuid="2" operation="migrate_to" operation_key="RemoteNode_migrate_to_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="1">
+    <action_set>
+      <rsc_op id="29" on_node="virt-143" on_node_uuid="2" operation="migrate_to" operation_key="RemoteNode_migrate_to_0">
+        <primitive class="ocf" id="RemoteNode" provider="pacemaker" type="remote"/>
+        <attributes CRM_meta_migrate_source="virt-143" CRM_meta_migrate_target="virt-142" CRM_meta_remote_node="virt-145" CRM_meta_timeout="20000" crm_feature_set="3.0.10" server="virt-145"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="2">
+    <action_set>
+      <rsc_op id="28" on_node="virt-142" on_node_uuid="1" operation="monitor" operation_key="RemoteNode_monitor_60000">
+        <primitive class="ocf" id="RemoteNode" provider="pacemaker" type="remote"/>
+        <attributes CRM_meta_interval="60000" CRM_meta_name="monitor" CRM_meta_remote_node="virt-145" CRM_meta_timeout="30000" crm_feature_set="3.0.10" server="virt-145"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <pseudo_event id="27" operation="start" operation_key="RemoteNode_start_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="3">
+    <action_set>
+      <pseudo_event id="27" operation="start" operation_key="RemoteNode_start_0">
+        <attributes CRM_meta_name="start" CRM_meta_remote_node="virt-145" CRM_meta_timeout="60000" crm_feature_set="3.0.10" server="virt-145"/>
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="26" on_node="virt-143" on_node_uuid="2" operation="stop" operation_key="RemoteNode_stop_0"/>
+      </trigger>
+      <trigger>
+        <rsc_op id="30" on_node="virt-142" on_node_uuid="1" operation="migrate_from" operation_key="RemoteNode_migrate_from_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="4">
+    <action_set>
+      <rsc_op id="26" on_node="virt-143" on_node_uuid="2" operation="stop" operation_key="RemoteNode_stop_0">
+        <primitive class="ocf" id="RemoteNode" provider="pacemaker" type="remote"/>
+        <attributes CRM_meta_name="stop" CRM_meta_remote_node="virt-145" CRM_meta_timeout="60000" crm_feature_set="3.0.10" server="virt-145"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="30" on_node="virt-142" on_node_uuid="1" operation="migrate_from" operation_key="RemoteNode_migrate_from_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="5">
+    <action_set>
+      <rsc_op id="47" on_node="virt-142" on_node_uuid="1" operation="monitor" operation_key="dummy8_monitor_10000">
+        <primitive class="ocf" id="dummy8" provider="heartbeat" type="Dummy"/>
+        <attributes CRM_meta_interval="10000" CRM_meta_name="monitor" CRM_meta_timeout="20000" crm_feature_set="3.0.10"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="46" on_node="virt-142" on_node_uuid="1" operation="start" operation_key="dummy8_start_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="6">
+    <action_set>
+      <rsc_op id="46" on_node="virt-142" on_node_uuid="1" operation="start" operation_key="dummy8_start_0">
+        <primitive class="ocf" id="dummy8" provider="heartbeat" type="Dummy"/>
+        <attributes CRM_meta_name="start" CRM_meta_timeout="20000" crm_feature_set="3.0.10"/>
+      </rsc_op>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="45" on_node="virt-143" on_node_uuid="2" operation="stop" operation_key="dummy8_stop_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+  <synapse id="7">
+    <action_set>
+      <rsc_op id="45" on_node="virt-143" on_node_uuid="2" operation="stop" operation_key="dummy8_stop_0">
+        <primitive class="ocf" id="dummy8" provider="heartbeat" type="Dummy"/>
+        <attributes CRM_meta_name="stop" CRM_meta_timeout="20000" crm_feature_set="3.0.10"/>
+      </rsc_op>
+    </action_set>
+    <inputs/>
+  </synapse>
+  <synapse id="8">
+    <action_set>
+      <pseudo_event id="13" operation="all_stopped" operation_key="all_stopped">
+        <attributes crm_feature_set="3.0.10"/>
+      </pseudo_event>
+    </action_set>
+    <inputs>
+      <trigger>
+        <rsc_op id="26" on_node="virt-143" on_node_uuid="2" operation="stop" operation_key="RemoteNode_stop_0"/>
+      </trigger>
+      <trigger>
+        <rsc_op id="45" on_node="virt-143" on_node_uuid="2" operation="stop" operation_key="dummy8_stop_0"/>
+      </trigger>
+    </inputs>
+  </synapse>
+</transition_graph>
\ No newline at end of file
diff --git a/pcs/usage.py b/pcs/usage.py
index 1d0dc07..a7bfd84 100644
--- a/pcs/usage.py
+++ b/pcs/usage.py
@@ -1,5 +1,11 @@
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
 import re
 
+
 examples = ""
 def full_usage():
     global examples
@@ -13,8 +19,9 @@ def full_usage():
     out += strip_extras(acl([],False))
     out += strip_extras(status([],False))
     out += strip_extras(config([],False))
-    print out.strip()
-    print "Examples:\n" + examples.replace(" \ ","")
+    out += strip_extras(pcsd([],False))
+    print(out.strip())
+    print("Examples:\n" + examples.replace(" \ ",""))
 
 def strip_extras(text):
     global examples
@@ -88,7 +95,7 @@ def sub_usage(args, output):
 def dict_depth(d, depth=0):
     if not isinstance(d, dict) or not d:
         return depth
-    return max(dict_depth(v, depth+1) for k, v in d.iteritems())
+    return max(dict_depth(v, depth+1) for k, v in d.items())
 
 def sub_gen_code(level,item,prev_level=[],spaces=""):
     out = ""
@@ -130,7 +137,9 @@ def sub_generate_bash_completion():
     tree["constraint"] = generate_tree(constraint([],False))
     tree["status"] = generate_tree(status([],False))
     tree["config"] = generate_tree(config([],False))
-    print """
+    tree["pcsd"] = generate_tree(pcsd([],False))
+    tree["node"] = generate_tree(node([], False))
+    print("""
     _pcs()
     {
     local cur cur1 cur2 cur3
@@ -140,19 +149,19 @@ def sub_generate_bash_completion():
     if [ "$COMP_CWORD" -gt "1" ]; then cur2="${COMP_WORDS[COMP_CWORD-2]}";fi
     if [ "$COMP_CWORD" -gt "2" ]; then cur3="${COMP_WORDS[COMP_CWORD-3]}";fi
 
-    """
-    print sub_gen_code(3,tree,[])
-    print sub_gen_code(2,tree,[])
-    print sub_gen_code(1,tree,[])
-    print """
+    """)
+    print(sub_gen_code(3,tree,[]))
+    print(sub_gen_code(2,tree,[]))
+    print(sub_gen_code(1,tree,[]))
+    print("""
     if [ $COMP_CWORD -eq 1 ]; then
-        COMPREPLY=( $(compgen -W "resource cluster stonith property acl constraint status config" -- $cur) )
+        COMPREPLY=( $(compgen -W "resource cluster stonith property acl constraint status config pcsd node" -- $cur) )
     fi
     return 0
 
     }
     complete -F _pcs pcs
-    """
+    """)
 
 
 def generate_tree(usage_txt):
@@ -169,7 +178,7 @@ def generate_tree(usage_txt):
 
         if ignore == True:
             continue
-        
+
         if re.match("^    \w",l):
             args = l.split()
             arg = args.pop(0)
@@ -204,11 +213,13 @@ Commands:
     acl         Set pacemaker access control lists
     status      View cluster status
     config      View and manage cluster configuration
+    pcsd        Manage pcs daemon
+    node        Manage cluster nodes
 """
 # Advanced usage to possibly add later
 #  --corosync_conf=<corosync file> Specify alternative corosync.conf file
     if pout:
-        print output
+        print(output)
     else:
         return output
                                                     
@@ -247,15 +258,15 @@ Commands:
         resource relatively to some resource already existing in the group.
         If --disabled is specified the resource is not started automatically.
         If --wait is specified, pcs will wait up to 'n' seconds for the resource
-        to start and then return 0 if the resource is started, or 1 if the
-        resource has not yet started. If 'n' is not specified, default resource
-        timeout will be used.
-        Example: pcs resource create VirtualIP ocf:heartbeat:IPaddr2 \\
-                     ip=192.168.0.99 cidr_netmask=32 op monitor interval=30s \\
-                     nic=eth2
-                 Create a new resource called 'VirtualIP' with IP address
-                 192.168.0.99, netmask of 32, monitored everything 30 seconds,
-                 on eth2.
+        to start and then return 0 if the resource is started, or 1 if
+        the resource has not yet started.  If 'n' is not specified it defaults
+        to 60 minutes.
+        Example: Create a new resource called 'VirtualIP' with IP address
+            192.168.0.99, netmask of 32, monitored everything 30 seconds,
+            on eth2.
+            pcs resource create VirtualIP ocf:heartbeat:IPaddr2 \\
+                ip=192.168.0.99 cidr_netmask=32 nic=eth2 \\
+                op monitor interval=30s
 
     delete <resource id|group id|master id|clone id>
         Deletes the resource, group, master or clone (and all resources within
@@ -265,16 +276,18 @@ Commands:
         Allow the cluster to start the resource. Depending on the rest of the
         configuration (constraints, options, failures, etc), the resource may
         remain stopped.  If --wait is specified, pcs will wait up to 'n' seconds
-        (or resource timeout seconds) for the resource to start and then return
-        0 if the resource is started, or 1 if the resource has not yet started.
+        for the resource to start and then return 0 if the resource is started,
+        or 1 if the resource has not yet started.  If 'n' is not specified it
+        defaults to 60 minutes.
 
     disable <resource id> [--wait[=n]]
         Attempt to stop the resource if it is running and forbid the cluster
         from starting it again.  Depending on the rest of the configuration
         (constraints, options, failures, etc), the resource may remain
-        started.  If --wait is specified, pcs will wait up to 'n' seconds (or
-        resource timeout seconds) for the resource to stop and then return 0
-        if the resource is stopped or 1 if the resource has not stopped.
+        started.  If --wait is specified, pcs will wait up to 'n' seconds for
+        the resource to stop and then return 0 if the resource is stopped or 1
+        if the resource has not stopped.  If 'n' is not specified it defaults
+        to 60 minutes.
 
     restart <resource id> [node] [--wait=n]
         Restart the resource specified. If a node is specified and if the
@@ -289,38 +302,71 @@ Commands:
         starting the resource.  Using --full will give more detailed output.
         This is mainly used for debugging resources that fail to start.
 
+    debug-stop <resource id> [--full]
+        This command will force the specified resource to stop on this node
+        ignoring the cluster recommendations and print the output from
+        stopping the resource.  Using --full will give more detailed output.
+        This is mainly used for debugging resources that fail to stop.
+
+    debug-promote <resource id> [--full]
+        This command will force the specified resource to be promoted on this
+        node ignoring the cluster recommendations and print the output from
+        promoting the resource.  Using --full will give more detailed output.
+        This is mainly used for debugging resources that fail to promote.
+
+    debug-demote <resource id> [--full]
+        This command will force the specified resource to be demoted on this
+        node ignoring the cluster recommendations and print the output from
+        demoting the resource.  Using --full will give more detailed output.
+        This is mainly used for debugging resources that fail to demote.
+
+    debug-monitor <resource id> [--full]
+        This command will force the specified resource to be moniored on this
+        node  ignoring the cluster recommendations and print the output from
+        monitoring the resource.  Using --full will give more detailed output.
+        This is mainly used for debugging resources that fail to be monitored.
+
     move <resource id> [destination node] [--master] [lifetime=<lifetime>]
          [--wait[=n]]
-        Move resource off current node (and optionally onto destination node).
-        If --master is used the scope of the command is limited to the master
-        role and you must use the master id (instead of the resource id).
-        If lifetime is not specified it defaults to infinite.  If --wait is
-        specified, pcs will wait up to 'n' seconds for the resource to start
-        on destination node and then return 0 if the resource is started, or 1
-        if the resource has not yet started.  If 'n' is not specified, default
-        resource timeout will be used.
+        Move the resource off the node it is currently running on by creating a
+        -INFINITY location constraint to ban the node.  If destination node is
+        specified the resource will be moved to that node by creating an
+        INFINITY location constraint to prefer the destination node.  If
+        --master is used the scope of the command is limited to the master role
+        and you must use the master id (instead of the resource id).  If
+        lifetime is specified then the constraint will expire after that time,
+        otherwise it defaults to infinity and the constraint can be cleared
+        manually with 'pcs resource clear' or 'pcs constraint delete'.  If
+        --wait is specified, pcs will wait up to 'n' seconds for the resource
+        to move and then return 0 on success or 1 on error.  If 'n' is not
+        specified it defaults to 60 minutes.
+        If you want the resource to preferably avoid running on some nodes but
+        be able to failover to them use 'pcs location avoids'.
 
     ban <resource id> [node] [--master] [lifetime=<lifetime>] [--wait[=n]]
         Prevent the resource id specified from running on the node (or on the
-        current node it is running on if no node is specified).
-        If --master is used the scope of the command is limited to the
-        master role and you must use the master id (instead of the resource id).
-        If lifetime is not specified it defaults to infinite.  If --wait is
-        specified, pcs will wait up to 'n' seconds for the resource to start
-        on different node and then return 0 if the resource is started, or 1
-        if the resource has not yet started.  If 'n' is not specified, default
-        resource timeout will be used.
-
-    clear <resource id> [node] [--master] [--wait=n]
+        current node it is running on if no node is specified) by creating a
+        -INFINITY location constraint.  If --master is used the scope of the
+        command is limited to the master role and you must use the master id
+        (instead of the resource id).  If lifetime is specified then the
+        constraint will expire after that time, otherwise it defaults to
+        infinity and the constraint can be cleared manually with 'pcs resource
+        clear' or 'pcs constraint delete'.  If --wait is specified, pcs will
+        wait up to 'n' seconds for the resource to move and then return 0
+        on success or 1 on error. If 'n' is not specified it defaults to 60
+        minutes.
+        If you want the resource to preferably avoid running on some nodes but
+        be able to failover to them use 'pcs location avoids'.
+
+    clear <resource id> [node] [--master] [--wait[=n]]
         Remove constraints created by move and/or ban on the specified
         resource (and node if specified).
         If --master is used the scope of the command is limited to the
         master role and you must use the master id (instead of the resource id).
-        If --wait is specified, pcs will wait up to 'n' seconds for resources
-        to start / move depending on the effect of removing the constraints and
-        then return 0 if resources are started on target nodes, or 1 if
-        resources have not yet started / moved.  If clear has no effect, pcs
-        will return 0.
+        If --wait is specified, pcs will wait up to 'n' seconds for the
+        operation to finish (including starting and/or moving resources if
+        appropriate) and then return 0 on success or 1 on error.  If 'n' is not
+        specified it defaults to 60 minutes.
 
     standards
         List available resource agent standards supported by this installation.
@@ -338,13 +384,12 @@ Commands:
         resource.  If an operation (op) is specified it will update the first
         found operation with the same action on the specified resource, if no
         operation with that action exists then a new operation will be created.
-        (WARNING: all current options on the update op will be reset if not
-        specified) If you want to create multiple monitor operations you should
-        use the add_operation & remove_operation commands.  If --wait is
+        (WARNING: all existing options on the updated operation will be reset
+        if not specified.)  If you want to create multiple monitor operations
+        you should use the 'op add' & 'op remove' commands.  If --wait is
         specified, pcs will wait up to 'n' seconds for the changes to take
         effect and then return 0 if the changes have been processed or 1
-        otherwise.  If 'n' is not specified, default resource timeout will
-        be used.
+        otherwise.  If 'n' is not specified it defaults to 60 minutes.
 
     op add <resource id> <operation action> [operation properties]
         Add operation for specified resource
@@ -367,8 +412,7 @@ Commands:
         may be removed by setting an option without a value.  If --wait is
         specified, pcs will wait up to 'n' seconds for the changes to take
         effect and then return 0 if the changes have been processed or 1
-        otherwise.  If 'n' is not specified, default resource timeout will
-        be used.
+        otherwise.  If 'n' is not specified it defaults to 60 minutes.
         Example: pcs resource meta TestResource failure-timeout=50 stickiness=
 
     group add <group name> <resource id> [resource id] ... [resource id]
@@ -378,51 +422,47 @@ Commands:
         to the new group.  You can use --before or --after to specify
         the position of the added resources relatively to some resource already
         existing in the group.  If --wait is specified, pcs will wait up to 'n'
-        seconds for resources to move depending on the effect of grouping and
-        then return 0 if the resources are moved, or 1 if the resources have not
-        yet moved.  If 'n' is not specified, default resource timeout will
-        be used.
+        seconds for the operation to finish (including moving resources if
+        appropriate) and then return 0 on success or 1 on error.  If 'n' is not
+        specified it defaults to 60 minutes.
 
     group remove <group name> <resource id> [resource id] ... [resource id]
           [--wait[=n]]
         Remove the specified resource(s) from the group, removing the group if
         it no resources remain.  If --wait is specified, pcs will wait up to 'n'
-        seconds for specified resources to move depending of the effect
-        of ungrouping and the return 0 if resources are moved to target nodes,
-        or 1 if resources have not yet moved.  If 'n' is not specified, default
-        resource timeout will be used.
+        seconds for the operation to finish (including moving resources if
+        appropriate) and then return 0 on success or 1 on error.  If 'n' is not
+        specified it defaults to 60 minutes.
 
     ungroup <group name> [resource id] ... [resource id] [--wait[=n]]
         Remove the group (Note: this does not remove any resources from the
         cluster) or if resources are specified, remove the specified resources
         from the group.  If --wait is specified, pcs will wait up to 'n' seconds
-        for specified resources (all group resources if no resource specified)
-        to move depending of the effect of ungrouping and the return 0 if
-        resources are moved to target nodes, or 1 if resources have not yet
-        moved.  If 'n' is not specified, default resource timeout will be used.
+        for the operation to finish (including moving resources if appropriate)
+        and the return 0 on success or 1 on error.  If 'n' is not specified it
+        defaults to 60 minutes.
 
     clone <resource id | group id> [clone options]... [--wait[=n]]
         Setup up the specified resource or group as a clone.  If --wait is
-        specified, pcs will wait up to 'n' seconds for the resource clones
-        to start and then return 0 if the clones are started, or 1 if
-        the clones has not yet started.  If 'n' is not specified, default
-        resource timeout will be used.
+        specified, pcs will wait up to 'n' seconds for the operation to finish
+        (including starting clone instances if appropriate) and then return 0
+        on success or 1 on error.  If 'n' is not specified it defaults to 60
+        minutes.
 
     unclone <resource id | group name> [--wait[=n]]
         Remove the clone which contains the specified group or resource (the
         resource or group will not be removed).  If --wait is specified, pcs
-        will wait up to 'n' seconds for the resource clones to stop and then
-        return 0 if the resource is running as one instance, or 1 if
-        the resource clones has not yet stopped.  If 'n' is not specified,
-        default resource timeout will be used.
+        will wait up to 'n' seconds for the operation to finish (including
+        stopping clone instances if appropriate) and then return 0 on success
+        or 1 on error.  If 'n' is not specified it defaults to 60 minutes.
 
     master [<master/slave name>] <resource id | group name> [options]
            [--wait[=n]]
         Configure a resource or group as a multi-state (master/slave) resource.
-        If --wait is specified, pcs will wait up to 'n' seconds for the resource
-        to be promoted and then return 0 if the resource is promoted, or 1 if
-        the resource has not yet been promoted.  If 'n' is not specified,
-        default resource timeout will be used.
+        If --wait is specified, pcs will wait up to 'n' seconds for the operation
+        to finish (including starting and promoting resource instances if
+        appropriate) and then return 0 on success or 1 on error.  If 'n' is not
+        specified it defaults to 60 minutes.
         Note: to remove a master you must remove the resource/group it contains.
 
     manage <resource id> ... [resource n]
@@ -453,6 +493,35 @@ Commands:
         a resource has failed in the past.  This may allow the resource to
         be started or moved to a more preferred location.
 
+    relocate dry-run [resource1] [resource2] ...
+        The same as 'relocate run' but has no effect on the cluster.
+
+    relocate run [resource1] [resource2] ...
+        Relocate specified resources to their preferred nodes.  If no resources
+        are specified, relocate all resources.
+        This command calculates the preferred node for each resource while
+        ignoring resource stickiness.  Then it creates location constraints
+        which will cause the resources to move to their preferred nodes.  Once
+        the resources have been moved the constraints are deleted automatically.
+        Note that the preferred node is calculated based on current cluster
+        status, constraints, location of resources and other settings and thus
+        it might change over time.
+
+    relocate show
+        Display current status of resources and their optimal node ignoring
+        resource stickiness.
+
+    relocate clear
+        Remove all constraints created by the 'relocate run' command.
+
+    utilization [<resource id> [<name>=<value> ...]]
+        Add specified utilization options to specified resource. If resource is
+        not specified, shows utilization of all resources. If utilization
+        options are not specified, shows utilization of specified resource.
+        Utilization option should be in format name=value, value has to be
+        integer. Options may be removed by setting an option without a value.
+        Example: pcs resource utilization TestResource cpu= ram=20
+
 Examples:
 
     pcs resource show
@@ -487,7 +556,7 @@ Notes:
 
 """
     if pout:
-        print sub_usage(args, output)
+        print(sub_usage(args, output))
     else:
         return output
 
@@ -517,14 +586,15 @@ Commands:
             [--ipv6] [--token <timeout>] [--token_coefficient <timeout>]
             [--join <timeout>] [--consensus <timeout>] [--miss_count_const <count>]
             [--fail_recv_const <failures>]
-        Configure corosync and sync configuration out to listed nodes
-        --local will only perform changes on the local node
-        --start will also start the cluster on the specified nodes
-        --enable will enable corosync and pacemaker on node startup
-        --transport allows specification of corosync transport (default: udpu)
+        Configure corosync and sync configuration out to listed nodes.
+        --local will only perform changes on the local node,
+        --start will also start the cluster on the specified nodes,
+        --enable will enable corosync and pacemaker on node startup,
+        --transport allows specification of corosync transport (default: udpu;
+            udp for CMAN clusters),
         --rrpmode allows you to set the RRP mode of the system. Currently only
             'passive' is supported or tested (using 'active' is not
-            recommended)
+            recommended).
         The --wait_for_all, --auto_tie_breaker, --last_man_standing,
         --last_man_standing_window options are all documented in corosync's
         votequorum(5) man page.
@@ -548,8 +618,8 @@ Commands:
 
         Configuring Redundant Ring Protocol (RRP)
 
-        When using udpu (the default) specifying nodes, specify the ring 0
-        address first followed by a ',' and then the ring 1 address.
+        When using udpu specifying nodes, specify the ring 0 address first
+        followed by a ',' and then the ring 1 address.
 
         Example: pcs cluster setup --name cname nodeA-0,nodeA-1 nodeB-0,nodeB-1
 
@@ -595,7 +665,7 @@ Commands:
         be able to host resources), if no node or options are specified the
         current node will be put into standby mode, if --all is specified all
         nodes will be put into standby mode.
-    
+
     unstandby [<node>] | --all
         Remove node from standby mode (the node specified will now be able to
         host resources), if no node or options are specified the current node
@@ -605,7 +675,7 @@ Commands:
     remote-node add <hostname> <resource id> [options]
         Enables the specified resource as a remote-node resource on the
         specified hostname (hostname should be the same as 'uname -n')
-    
+
     remote-node remove <hostname>
         Disables any resources configured to be remote-node resource on the
         specified hostname (hostname should be the same as 'uname -n')
@@ -617,9 +687,6 @@ Commands:
         Get current status of pcsd on nodes specified, or on all nodes
         configured in corosync.conf if no nodes are specified
 
-    certkey <certificate file> <key file>
-        Load custom certificate and key files for use in pcsd
-
     sync
         Sync corosync configuration to all nodes found from current
         corosync.conf file (cluster.conf on systems running Corosync 1.x)
@@ -643,15 +710,20 @@ Commands:
 
     cib-push <filename> [scope=<scope> | --config]
         Push the raw xml from <filename> to the CIB (Cluster Information Base).
+        You can obtain the CIB by running the 'pcs cluster cib' command, which
+        is recommended first step when you want to perform desired
+        modifications (pcs -f <command>) for the one-off push.
         Specify scope to push a specific section of the CIB.  Valid values
         of the scope are: configuration, nodes, resources, constraints,
         crm_config, rsc_defaults, op_defaults.  --config is the same as
         scope=configuration.  Use of --config is recommended.  Do not specify
         a scope if you need to push the whole CIB or be warned in the case
         of outdated CIB.
+        WARNING: the selected scope of the CIB will be overwritten by the
+        current content of the specified file.
 
     cib-upgrade
-        Upgrade the cib to the latest version
+        Upgrade the CIB to conform to the latest version of the document schema
 
     edit [scope=<scope> | --config]
         Edit the cib in the editor specified by the $EDITOR environment
@@ -696,8 +768,8 @@ Commands:
     destroy [--all]
         Permanently destroy the cluster on the current node, killing all
         corosync/pacemaker processes removing all cib files and the
-        corosync.conf file.  Using '--all' will attempt to destroy the
-        cluster on all nodes configure in the corosync.conf file
+        corosync.conf file.  Using --all will attempt to destroy the
+        cluster on all nodes configure in the corosync.conf file.
         WARNING: This command permantly removes any cluster configuration that
         has been created. It is recommended to run 'pcs cluster stop' before
         destroying the cluster.
@@ -705,16 +777,16 @@ Commands:
     verify [-V] [filename]
         Checks the pacemaker configuration (cib) for syntax and common
         conceptual errors.  If no filename is specified the check is
-        performmed on the currently running cluster.  If '-V' is used
+        performed on the currently running cluster.  If -V is used
         more verbose output will be printed
 
     report [--from "YYYY-M-D H:M:S" [--to "YYYY-M-D" H:M:S"]] dest
         Create a tarball containing everything needed when reporting cluster
-        problems.  If '--from' and '--to' are not used, the report will include
-        the past 24 hours
+        problems.  If --from and --to are not used, the report will include
+        the past 24 hours.
 """
     if pout:
-        print sub_usage(args, output)
+        print(sub_usage(args, output))
     else:
         return output
 
@@ -738,11 +810,13 @@ Commands:
         Show options for specified stonith agent
 
     create <stonith id> <stonith device type> [stonith device options]
+           [op <operation action> <operation options> [<operation action>
+           <operation options>]...] [meta <meta options>...]
         Create stonith device with specified type and options
 
     update <stonith id> [stonith device options]
         Add/Change options to specified stonith id
-        
+
     delete <stonith id>
         Remove stonith id from configuration
 
@@ -783,7 +857,10 @@ Commands:
         call to stonith which will turn the node off instead of rebooting it)
 
     confirm <node>
-        Confirm that the host specified is currently down
+        Confirm that the host specified is currently down.  This command
+        should ONLY be used when the node specified has already been
+        confirmed to be down.
+
         WARNING: if this node is not actually down data corruption/cluster
         failure can occur.
 
@@ -791,7 +868,7 @@ Examples:
     pcs stonith create MyStonith fence_virt pcmk_host_list=f1
 """
     if pout:
-        print sub_usage(args, output)
+        print(sub_usage(args, output))
     else:
         return output
 
@@ -802,27 +879,30 @@ Configure pacemaker properties
 
 Commands:
     list|show [<property> | --all | --defaults]
-        List property settings (default: lists configured properties)
+        List property settings (default: lists configured properties).
         If --defaults is specified will show all property defaults, if --all
         is specified, current configured properties will be shown with unset
-        properties and their defaults
+        properties and their defaults.
+        Run 'man pengine' and 'man crmd' to get a description of the properties.
 
     set [--force] [--node <nodename>] <property>=[<value>]
         Set specific pacemaker properties (if the value is blank then the
         property is removed from the configuration).  If a property is not
         recognized by pcs the property will not be created unless the
-        '--force' is used.  If --node is used a node attribute is set on
+        --force is used.  If --node is used a node attribute is set on
         the specified node.
+        Run 'man pengine' and 'man crmd' to get a description of the properties.
 
     unset [--node <nodename>] <property>
         Remove property from configuration (or remove attribute from
         specified node if --node is used).
+        Run 'man pengine' and 'man crmd' to get a description of the properties.
 
 Examples:
     pcs property set stonith-enabled=false
 """
     if pout:
-        print sub_usage(args, output)
+        print(sub_usage(args, output))
     else:
         return output
 
@@ -858,7 +938,7 @@ Commands:
           <expression> and|or <expression>
           ( <expression> )
         where duration options and date spec options are: hours, monthdays,
-        weekdays, yeardays, months, weeks, years, weekyears, moon
+        weekdays, yeardays, months, weeks, years, weekyears, moon.
         If score is omitted it defaults to INFINITY. If id is omitted one is
         generated from the resource id. If resource-discovery is omitted it
         defaults to 'always'.
@@ -868,7 +948,7 @@ Commands:
         location constraints are displayed per resource (default), if 'nodes'
         is specified location constraints are displayed per node.  If specific
         nodes or resources are specified then we only show information about
-        them
+        them.  If --full is specified show the internal constraint id's as well.
 
     location add <id> <resource name> <node> <score> [resource-discovery=<option>]
         Add a location constraint with the appropriate id, resource name,
@@ -879,7 +959,7 @@ Commands:
         node name and score. (For more advanced pacemaker usage)
 
     order show [--full]
-        List all current ordering constraints (if '--full' is specified show
+        List all current ordering constraints (if --full is specified show
         the internal constraint id's as well).
 
     order [action] <resource id> then [action] <resource id> [options]
@@ -887,7 +967,7 @@ Commands:
         demote) and if no action is specified the default action will be
         start.
         Available options are kind=Optional/Mandatory/Serialize,
-        symmetrical=true/false and id=<constraint-id>.
+        symmetrical=true/false, require-all=true/false and id=<constraint-id>.
 
     order set <resource1> <resource2> [resourceN]... [options] [set
               <resourceX> <resourceY> ... [options]]
@@ -902,7 +982,7 @@ Commands:
         Remove resource from any ordering constraint
 
     colocation show [--full]
-        List all current colocation constraints (if '--full' is specified show
+        List all current colocation constraints (if --full is specified show
         the internal constraint id's as well).
 
     colocation add [master|slave] <source resource id> with [master|slave]
@@ -911,8 +991,8 @@ Commands:
         determined <target resource> should run.  Positive values of score
         mean the resources should be run on the same node, negative values
         mean the resources should not be run on the same node.  Specifying
-        'INFINITY' (or '-INFINITY') for the score force <source resource> to
-        run (or not run) with <target resource>. (score defaults to "INFINITY")
+        'INFINITY' (or '-INFINITY') for the score forces <source resource> to
+        run (or not run) with <target resource> (score defaults to "INFINITY").
         A role can be master or slave (if no role is specified, it defaults to
         'started').
 
@@ -956,7 +1036,7 @@ Commands:
         constraint, the constraint will be removed
 """
     if pout:
-        print sub_usage(args, output)
+        print(sub_usage(args, output))
     else:
         return output
 
@@ -1016,7 +1096,7 @@ Commands:
         parenthesis after permissions in 'pcs acl' output)
 """
     if pout:
-        print sub_usage(args, output)
+        print(sub_usage(args, output))
     else:
         return output
 
@@ -1054,7 +1134,7 @@ Commands:
         View xml version of status (output from crm_mon -r -1 -X)
 """
     if pout:
-        print sub_usage(args, output)
+        print(sub_usage(args, output))
     else:
         return output
 
@@ -1096,8 +1176,78 @@ Commands:
         /etc/cluster/cluster.conf will be used.  You can force to create output
         containing either cluster.conf or corosync.conf using the output-format
         option.
+
+    import-cman output=<filename> [input=<filename>] [--interactive]
+            output-format=pcs-commands|pcs-commands-verbose
+        Converts CMAN cluster configuration to a list of pcs commands which
+        recreates the same cluster as Pacemaker cluster when executed.  Commands
+        will be saved to 'output' file.  For other options see above.
+
+    export pcs-commands|pcs-commands-verbose output=<filename>
+        Creates a list of pcs commands which upon execution recreates
+        the current cluster running on this node.  Commands will be saved
+        to 'output' file.  Use pcs-commands to get a simple list of commands,
+        whereas pcs-commands-verbose creates a list including comments and debug
+        messages.
+"""
+    if pout:
+        print(sub_usage(args, output))
+    else:
+        return output
+
+def pcsd(args=[], pout=True):
+    output = """
+Usage: pcs pcsd [commands]...
+Manage pcs daemon
+
+Commands:
+    certkey <certificate file> <key file>
+        Load custom certificate and key files for use in pcsd.
+
+    sync-certificates
+        Sync pcsd certificates to all nodes found from current corosync.conf
+        file (cluster.conf on systems running Corosync 1.x).  WARNING: This will
+        restart pcsd daemon on the nodes.
+
+    clear-auth [--local] [--remote]
+       Removes all system tokens which allow pcs/pcsd on the current system to
+       authenticate with remote pcs/pcsd instances and vice-versa.  After this
+       command is run this node will need to be re-authenticated with other
+       nodes (using 'pcs cluster auth').  Using --local only removes tokens
+       used by local pcs (and pcsd if root) to connect to other pcsd instances,
+       using --remote clears authentication tokens used by remote systems to
+       connect to the local pcsd instance.
+"""
+    if pout:
+        print(sub_usage(args, output))
+    else:
+        return output
+
+def node(args=[], pout=True):
+    output = """
+Usage: pcs node <command>
+Manage cluster nodes
+
+Commands:
+    maintenance [--all] | [node]...
+        Put specified node(s) into maintenance mode, if no node or options are
+        specified the current node will be put into maintenance mode, if --all
+        is specified all nodes will be put into maintenace mode.
+
+    unmaintenance [--all] | [node]...
+        Remove node(s) from maintenance mode, if no node or options are
+        specified the current node will be removed from maintenance mode,
+        if --all is specified all nodes will be removed from maintenance mode.
+
+    utilization [<node> [<name>=<value> ...]]
+        Add specified utilization options to specified node. If node is not
+        specified, shows utilization of all nodes. If utilization options are
+        not specified, shows utilization of specified node. Utilization option
+        should be in format name=value, value has to be integer. Options may be
+        removed by setting an option without a value.
+        Example: pcs node utilization node1 cpu=4 ram=
 """
     if pout:
-        print sub_usage(args, output)
+        print(sub_usage(args, output))
     else:
         return output
diff --git a/pcs/utils.py b/pcs/utils.py
index 653d31e..05afa76 100644
--- a/pcs/utils.py
+++ b/pcs/utils.py
@@ -1,23 +1,62 @@
-import os, subprocess
+from __future__ import absolute_import
+from __future__ import division
+from __future__ import print_function
+from __future__ import unicode_literals
+
+import os
 import sys
-import pcs
+import subprocess
+import ssl
+import inspect
 import xml.dom.minidom
-import urllib,urllib2
-from xml.dom.minidom import parseString,parse
+from xml.dom.minidom import parseString, parse
 import xml.etree.ElementTree as ET
 import re
 import json
 import tempfile
-import settings
-import resource
 import signal
 import time
-import cStringIO
+from io import BytesIO
 import tarfile
-import cluster
-import prop
 import fcntl
+import getpass
+import base64
+try:
+    # python2
+    from urllib import urlencode as urllib_urlencode
+except ImportError:
+    # python3
+    from urllib.parse import urlencode as urllib_urlencode
+try:
+    # python2
+    from urllib2 import (
+        build_opener as urllib_build_opener,
+        install_opener as urllib_install_opener,
+        HTTPCookieProcessor as urllib_HTTPCookieProcessor,
+        HTTPSHandler as urllib_HTTPSHandler,
+        HTTPError as urllib_HTTPError,
+        URLError as urllib_URLError
+    )
+except ImportError:
+    # python3
+    from urllib.request import (
+        build_opener as urllib_build_opener,
+        install_opener as urllib_install_opener,
+        HTTPCookieProcessor as urllib_HTTPCookieProcessor,
+        HTTPSHandler as urllib_HTTPSHandler
+    )
+    from urllib.error import (
+        HTTPError as urllib_HTTPError,
+        URLError as urllib_URLError
+    )
 
+import settings
+import resource
+import cluster
+import corosync_conf as corosync_conf_utils
+
+
+PYTHON2 = sys.version[0] == "2"
 
 # usefile & filename variables are set in pcs module
 usefile = False
@@ -27,6 +66,14 @@ fence_bin = settings.fence_agent_binaries
 
 score_regexp = re.compile(r'^[+-]?((INFINITY)|(\d+))$')
 
+def simple_cache(func):
+    cache = {}
+    def wrapper(*args):
+        if args not in cache:
+            cache[args] = func()
+        return cache[args]
+    return wrapper
+
 def getValidateWithVersion(dom):
     cib = dom.getElementsByTagName("cib")
     if len(cib) != 1:
@@ -58,47 +105,7 @@ def checkStatus(node):
 
 # Check and see if we're authorized (faster than a status check)
 def checkAuthorization(node):
-    out = sendHTTPRequest(node, 'remote/check_auth', None, False, False)
-    return out
-
-def tokenFile():
-    if 'PCS_TOKEN_FILE' in os.environ:
-        return os.environ['PCS_TOKEN_FILE']
-    else:
-        if os.getuid() == 0:
-            return "/var/lib/pcsd/tokens"
-        else:
-            return os.path.expanduser("~/.pcs/tokens")
-
-def updateToken(node,nodes,username,password):
-    count = 0
-    orig_data = {}
-    for n in nodes:
-        orig_data["node-"+str(count)] = n
-        count = count + 1
-    orig_data["username"] = username
-    orig_data["password"] = password
-    if "--local" not in pcs_options and node != os.uname()[1]:
-        orig_data["bidirectional"] = 1
-
-    if "--force" in pcs_options:
-        orig_data["force"] = 1
-
-    data = urllib.urlencode(orig_data)
-    out = sendHTTPRequest(node, 'remote/auth', data, False, False)
-    if out[0] != 0:
-        err("%s: Unable to connect to pcsd: %s" % (node, out[1]), False)
-        return False
-    token = out[1]
-    if token == "":
-        err("%s: Username and/or password is incorrect" % node, False)
-        return False
-
-    tokens = readTokens()
-    tokens[node] = token
-    writeTokens(tokens)
-
-    return True
+    return sendHTTPRequest(node, 'remote/check_auth', None, False, False)
 
 def get_uid_gid_file_name(uid, gid):
     return "pcs-uidgid-%s-%s" % (uid, gid)
@@ -175,58 +182,27 @@ def remove_uid_gid_file(uid,gid):
     return file_removed
 # Returns a dictionary {'nodeA':'tokenA'}
 def readTokens():
-    tokenfile = tokenFile()
     tokens = {}
-    f = None
-    if not os.path.isfile(tokenfile):
-        return tokens
-    try:
-        f = open(tokenfile, "r")
-        fcntl.flock(f.fileno(), fcntl.LOCK_SH)
-        tokens = json.load(f)
-    except:
-        pass
-    finally:
-        if f is not None:
-            fcntl.flock(f.fileno(), fcntl.LOCK_UN)
-            f.close()
+    output, retval = run_pcsdcli("read_tokens")
+    if retval == 0 and output['status'] == 'ok' and output['data']:
+        tokens = output['data']
     return tokens
 
-# Takes a dictionary {'nodeA':'tokenA'}
-def writeTokens(tokens):
-    tokenfile = tokenFile()
-    f = None
-    if not os.path.isfile(tokenfile) and 'PCS_TOKEN_FILE' not in os.environ:
-        if not os.path.exists(os.path.dirname(tokenfile)):
-            os.makedirs(os.path.dirname(tokenfile),0700)
-    try:
-        f = os.fdopen(os.open(tokenfile, os.O_WRONLY | os.O_CREAT, 0600), "w")
-        fcntl.flock(f.fileno(), fcntl.LOCK_EX)
-        f.truncate()
-        json.dump(tokens, f)
-    except Exception as ex:
-        err("Failed to store tokens into file '%s': %s" % (tokenfile, ex.message))
-    finally:
-        if f is not None:
-            fcntl.flock(f.fileno(), fcntl.LOCK_UN)
-            f.close()
-
 # Set the corosync.conf file on the specified node
 def getCorosyncConfig(node):
-    retval, output = sendHTTPRequest(node, 'remote/get_corosync_conf', None, False, False)
-    return retval,output
+    return sendHTTPRequest(node, 'remote/get_corosync_conf', None, False, False)
 
 def setCorosyncConfig(node,config):
     if is_rhel6():
-        data = urllib.urlencode({'cluster_conf':config})
+        data = urllib_urlencode({'cluster_conf':config})
         (status, data) = sendHTTPRequest(node, 'remote/set_cluster_conf', data)
         if status != 0:
-            err("Unable to set cluster.conf")
+            err("Unable to set cluster.conf: {0}".format(data))
     else:
-        data = urllib.urlencode({'corosync_conf':config})
+        data = urllib_urlencode({'corosync_conf':config})
         (status, data) = sendHTTPRequest(node, 'remote/set_corosync_conf', data)
         if status != 0:
-            err("Unable to set corosync config")
+            err("Unable to set corosync config: {0}".format(data))
 
 def startCluster(node, quiet=False):
     return sendHTTPRequest(node, 'remote/cluster_start', None, False, not quiet)
@@ -239,7 +215,7 @@ def stopCluster(node, quiet=False, pacemaker=True, corosync=True, force=True):
         data["component"] = "corosync"
     if force:
         data["force"] = 1
-    data = urllib.urlencode(data)
+    data = urllib_urlencode(data)
     return sendHTTPRequest(node, 'remote/cluster_stop', data, False, not quiet)
 
 def enableCluster(node):
@@ -252,30 +228,39 @@ def destroyCluster(node, quiet=False):
     return sendHTTPRequest(node, 'remote/cluster_destroy', None, not quiet, not quiet)
 
 def restoreConfig(node, tarball_data):
-    data = urllib.urlencode({"tarball": tarball_data})
+    data = urllib_urlencode({"tarball": tarball_data})
     return sendHTTPRequest(node, "remote/config_restore", data, False, True)
 
+def pauseConfigSyncing(node, delay_seconds=300):
+  data = urllib_urlencode({"sync_thread_pause": delay_seconds})
+  return sendHTTPRequest(node, "remote/set_sync_options", data, False, False)
+
+def resumeConfigSyncing(node):
+  data = urllib_urlencode({"sync_thread_resume": 1})
+  return sendHTTPRequest(node, "remote/set_sync_options", data, False, False)
+
 def canAddNodeToCluster(node):
-    retval, output = sendHTTPRequest(node, 'remote/node_available', [], False, False)
+    retval, output = sendHTTPRequest(
+        node, 'remote/node_available', None, False, False
+    )
     if retval == 0:
         try:
             myout = json.loads(output)
             if "notauthorized" in myout and myout["notauthorized"] == "true":
                 return (False, "unable to authenticate to node")
             if "node_available" in myout and myout["node_available"] == True:
-                return (True,"")
+                return (True, "")
             else:
-                return (False,"node is already in a cluster")
+                return (False, "node is already in a cluster")
         except ValueError:
             return (False, "response parsing error")
-
-    return (False,"error checking node availability")
+    return (False, "error checking node availability: {0}".format(output))
 
 def addLocalNode(node, node_to_add, ring1_addr=None):
     options = {'new_nodename': node_to_add}
     if ring1_addr:
         options['new_ring1addr'] = ring1_addr
-    data = urllib.urlencode(options)
+    data = urllib_urlencode(options)
     retval, output = sendHTTPRequest(node, 'remote/add_node', data, False, False)
     if retval == 0:
         try:
@@ -289,7 +274,7 @@ def addLocalNode(node, node_to_add, ring1_addr=None):
         return 1, output
 
 def removeLocalNode(node, node_to_remove, pacemaker_remove=False):
-    data = urllib.urlencode({'remove_nodename':node_to_remove, 'pacemaker_remove':pacemaker_remove})
+    data = urllib_urlencode({'remove_nodename':node_to_remove, 'pacemaker_remove':pacemaker_remove})
     retval, output = sendHTTPRequest(node, 'remote/remove_node', data, False, False)
     if retval == 0:
         try:
@@ -308,74 +293,119 @@ def removeLocalNode(node, node_to_remove, pacemaker_remove=False):
 # 1 = HTTP Error
 # 2 = No response,
 # 3 = Auth Error
+# 4 = Permission denied
 def sendHTTPRequest(host, request, data = None, printResult = True, printSuccess = True):
     url = 'https://' + host + ':2224/' + request
-    opener = urllib2.build_opener(urllib2.HTTPCookieProcessor())
+    # enable self-signed certificates
+    # https://www.python.org/dev/peps/pep-0476/
+    # http://bugs.python.org/issue21308
+    if (
+        hasattr(ssl, "_create_unverified_context")
+        and
+        "context" in inspect.getargspec(urllib_HTTPSHandler.__init__).args
+    ):
+        opener = urllib_build_opener(
+            urllib_HTTPSHandler(context=ssl._create_unverified_context()),
+            urllib_HTTPCookieProcessor()
+        )
+    else:
+        opener = urllib_build_opener(urllib_HTTPCookieProcessor())
+
     tokens = readTokens()
     if "--debug" in pcs_options:
-        print "Sending HTTP Request to: " + url
-        print "Data: " + str(data)
+        print("Sending HTTP Request to: " + url)
+        print("Data: {0}".format(data))
+    # python3 requires data to by bytes not str
+    if data:
+        data = data.encode("utf-8")
+
+    # cookies
+    cookies = []
     if host in tokens:
-        opener.addheaders.append(('Cookie', 'token='+tokens[host]))
-    urllib2.install_opener(opener)
+        cookies.append("token=" + tokens[host])
+    if os.geteuid() == 0:
+        for name in ("CIB_user", "CIB_user_groups"):
+            if name in os.environ and os.environ[name].strip():
+                value = os.environ[name].strip()
+                # Let's be safe about characters in env variables and do base64.
+                # We cannot do it for CIB_user however to be backward compatible
+                # so we at least remove disallowed characters.
+                if "CIB_user" == name:
+                    value = re.sub(r"[^!-~]", "", value).replace(";", "")
+                else:
+                    value = base64.b64encode(value)
+                cookies.append("{0}={1}".format(name, value))
+    if cookies:
+        opener.addheaders.append(('Cookie', ";".join(cookies)))
+
+    # send the request
+    urllib_install_opener(opener)
     try:
         result = opener.open(url,data)
-        html = result.read()
+        # python3 returns bytes not str
+        html = result.read().decode("utf-8")
         if printResult or printSuccess:
-            print host + ": " + html.strip()
+            print(host + ": " + html.strip())
         if "--debug" in pcs_options:
-            print "Response Code: 0"
-            print "--Debug Response Start--\n" + html,
-            print "--Debug Response End--"
+            print("Response Code: 0")
+            print("--Debug Response Start--\n{0}".format(html), end="")
+            print("--Debug Response End--")
+            print()
         return (0,html)
-    except urllib2.HTTPError, e:
+    except urllib_HTTPError as e:
         if "--debug" in pcs_options:
-            print "Response Code: " + str(e.code)
-        if printResult:
-            if e.code == 401:
-                print "Unable to authenticate to %s - (HTTP error: %d), try running 'pcs cluster auth'" % (host,e.code)
-            else:
-                print "Error connecting to %s - (HTTP error: %d)" % (host,e.code)
+            print("Response Code: " + str(e.code))
         if e.code == 401:
-            return (3,"Unable to authenticate to %s - (HTTP error: %d), try running 'pcs cluster auth'" % (host,e.code))
+            output = (
+                3,
+                "Unable to authenticate to {node} - (HTTP error: {code}), try running 'pcs cluster auth'".format(
+                    node=host, code=e.code
+                )
+            )
+        elif e.code == 403:
+            output = (
+                4,
+                "{node}: Permission denied - (HTTP error: {code})".format(
+                    node=host, code=e.code
+                )
+            )
         else:
-            return (1,"Error connecting to %s - (HTTP error: %d)" % (host,e.code))
-    except urllib2.URLError, e:
+            output = (
+                1,
+                "Error connecting to {node} - (HTTP error: {code})".format(
+                    node=host, code=e.code
+                )
+            )
+        if printResult:
+            print(output[1])
+        return output
+    except urllib_URLError as e:
         if "--debug" in pcs_options:
-            print "Response Reason: " + str(e.reason)
+            print("Response Reason: " + str(e.reason))
         if printResult:
-            print "Unable to connect to %s (%s)" % (host, e.reason)
+            print("Unable to connect to %s (%s)" % (host, e.reason))
         return (2,"Unable to connect to %s (%s)" % (host, e.reason))
 
 def getNodesFromCorosyncConf(conf_text=None):
     if is_rhel6():
-        try:
-            dom = (
-                parse(settings.cluster_conf_file) if conf_text is None
-                else parseString(conf_text)
-            )
-        except IOError:
-            err("Unable to open cluster.conf file to get nodes list")
+        dom = getCorosyncConfParsed(text=conf_text)
         return [
             node_el.getAttribute("name")
             for node_el in dom.getElementsByTagName("clusternode")
         ]
 
+    conf_root = getCorosyncConfParsed(text=conf_text)
     nodes = []
-    corosync_conf = getCorosyncConf() if conf_text is None else conf_text
-    lines = corosync_conf.strip().split('\n')
-    preg = re.compile(r'.*ring0_addr: (.*)')
-    for line in lines:
-        match = preg.match(line)
-        if match:
-            nodes.append (match.group(1))
-
+    for nodelist in conf_root.get_sections("nodelist"):
+        for node in nodelist.get_sections("node"):
+            for attr in node.get_attributes("ring0_addr"):
+                nodes.append(attr[1])
     return nodes
 
 def getNodesFromPacemaker():
     ret_nodes = []
     root = get_cib_etree()
-    nodes = root.findall(".//node")
+    nodes = root.findall(str(".//node"))
     for node in nodes:
         ret_nodes.append(node.attrib["uname"])
     ret_nodes.sort()
@@ -393,15 +423,32 @@ def getCorosyncConf(conf=None):
         err("Unable to read %s: %s" % (conf, e.strerror))
     return out
 
+def getCorosyncConfParsed(conf=None, text=None):
+    conf_text = getCorosyncConf(conf) if text is None else text
+    if is_rhel6():
+        try:
+            return parseString(conf_text)
+        except xml.parsers.expat.ExpatError as e:
+            err("Unable to parse cluster.conf: %s" % e)
+    try:
+        return corosync_conf_utils.parse_string(conf_text)
+    except corosync_conf_utils.CorosyncConfException as e:
+        err("Unable to parse corosync.conf: %s" % e)
+
 def setCorosyncConf(corosync_config, conf_file=None):
-    if conf_file == None:
-        conf_file = settings.corosync_conf_file
+    if not conf_file:
+        if is_rhel6():
+            conf_file = settings.cluster_conf_file
+        else:
+            conf_file = settings.corosync_conf_file
     try:
         f = open(conf_file,'w')
         f.write(corosync_config)
         f.close()
-    except IOError:
-        err("unable to write corosync configuration file, try running as root.")
+    except EnvironmentError as e:
+        err("Unable to write {0}, try running as root.\n{1}".format(
+            conf_file, e.strerror
+        ))
 
 def reloadCorosync():
     if is_rhel6():
@@ -444,7 +491,7 @@ def getCorosyncActiveNodes():
                 mapped_id = new_id
                 break
         if mapped_id == None:
-            print "Error mapping %s" % node
+            print("Error mapping %s" % node)
             continue
         for new_id, status in nodes_status:
             if new_id == mapped_id:
@@ -464,7 +511,8 @@ def addNodeToCorosync(node):
     node0, node1 = parse_multiring_node(node)
     used_node_ids = []
     num_nodes_in_conf = 0
-    for c_node in getNodesFromCorosyncConf():
+    corosync_conf_text = getCorosyncConf()
+    for c_node in getNodesFromCorosyncConf(conf_text=corosync_conf_text):
         if (c_node == node0) or (c_node == node1):
             err("node already exists in corosync.conf")
         num_nodes_in_conf = num_nodes_in_conf + 1
@@ -472,38 +520,22 @@ def addNodeToCorosync(node):
         for c_node in getCorosyncActiveNodes():
             if (c_node == node0) or (c_node == node1):
                 err("Node already exists in running corosync")
-    corosync_conf = getCorosyncConf()
+    corosync_conf = getCorosyncConfParsed(text=corosync_conf_text)
     new_nodeid = getNextNodeID(corosync_conf)
-    nl_re = re.compile(r"nodelist\s*{")
-    results = nl_re.search(corosync_conf)
-    if results:
-        bracket_depth = 1
-        count = results.end()
-        for c in corosync_conf[results.end():]:
-            if c == "}":
-                bracket_depth -= 1
-            if c == "{":
-                bracket_depth += 1
-
-            if bracket_depth == 0:
-                break
-            count += 1
-        new_corosync_conf = corosync_conf[:count]
-        new_corosync_conf += "  node {\n"
-        if node1 is not None:
-            new_corosync_conf += "        ring0_addr: %s\n" % (node0)
-            new_corosync_conf += "        ring1_addr: %s\n" % (node1)
-        else:
-            new_corosync_conf += "        ring0_addr: %s\n" % (node0)
-        new_corosync_conf += "        nodeid: %d\n" % (new_nodeid)
-        new_corosync_conf += "       }\n"
-        new_corosync_conf += corosync_conf[count:]
-        if num_nodes_in_conf >= 2:
-            new_corosync_conf = rmQuorumOption(new_corosync_conf,("two_node","1"))
-        setCorosyncConf(new_corosync_conf)
-    else:
+
+    nodelists = corosync_conf.get_sections("nodelist")
+    if not nodelists:
         err("unable to find nodelist in corosync.conf")
+    nodelist = nodelists[0]
+    new_node = corosync_conf_utils.Section("node")
+    nodelist.add_section(new_node)
+    new_node.add_attribute("ring0_addr", node0)
+    if node1:
+        new_node.add_attribute("ring1_addr", node1)
+    new_node.add_attribute("nodeid", new_nodeid)
 
+    corosync_conf = autoset_2node_corosync(corosync_conf)
+    setCorosyncConf(str(corosync_conf))
     return True
 
 def addNodeToClusterConf(node):
@@ -513,30 +545,30 @@ def addNodeToClusterConf(node):
         if (existing_node == node0) or (existing_node == node1):
             err("node already exists in cluster.conf")
 
-    output, retval = run(["/usr/sbin/ccs", "-f", settings.cluster_conf_file, "--addnode", node0])
+    output, retval = run(["ccs", "-f", settings.cluster_conf_file, "--addnode", node0])
     if retval != 0:
-        print output
+        print(output)
         err("error adding node: %s" % node0)
 
     if node1:
         output, retval = run([
-            "/usr/sbin/ccs", "-f", settings.cluster_conf_file,
+            "ccs", "-f", settings.cluster_conf_file,
             "--addalt", node0, node1
         ])
         if retval != 0:
-            print output
+            print(output)
             err(
                 "error adding alternative address for node: %s" % node0
             )
 
-    output, retval = run(["/usr/sbin/ccs", "-i", "-f", settings.cluster_conf_file, "--addmethod", "pcmk-method", node0])
+    output, retval = run(["ccs", "-i", "-f", settings.cluster_conf_file, "--addmethod", "pcmk-method", node0])
     if retval != 0:
-        print output
+        print(output)
         err("error adding fence method: %s" % node)
 
-    output, retval = run(["/usr/sbin/ccs", "-i", "-f", settings.cluster_conf_file, "--addfenceinst", "pcmk-redirect", node0, "pcmk-method", "port="+node0])
+    output, retval = run(["ccs", "-i", "-f", settings.cluster_conf_file, "--addfenceinst", "pcmk-redirect", node0, "pcmk-method", "port="+node0])
     if retval != 0:
-        print output
+        print(output)
         err("error adding fence instance: %s" % node)
 
     if len(nodes) == 2:
@@ -545,61 +577,34 @@ def addNodeToClusterConf(node):
         cman_options_map.pop("two_node", None)
         cman_options = ["%s=%s" % (n, v) for n, v in cman_options_map.items()]
         output, retval = run(
-            ["/usr/sbin/ccs", "-i", "-f", settings.cluster_conf_file, "--setcman"]
+            ["ccs", "-i", "-f", settings.cluster_conf_file, "--setcman"]
             + cman_options
         )
         if retval != 0:
-            print output
+            print(output)
             err("unable to set cman options")
 
     return True
 
-# TODO: Need to make this smarter about parsing files not generated by pcs
 def removeNodeFromCorosync(node):
     removed_node = False
-    node_found = False
     num_nodes_in_conf = 0
-
     node0, node1 = parse_multiring_node(node)
 
-    for c_node in getNodesFromCorosyncConf():
-        if c_node == node0:
-            node_found = True
-        num_nodes_in_conf = num_nodes_in_conf + 1
-
-    if not node_found:
-        return False
-
-    new_corosync_conf_lines = []
-    in_node = False
-    node_match = False
-    node_buffer = []
-    for line in getCorosyncConf().split("\n"):
-        if in_node:
-            node_buffer.append(line)
-            if (
-                ("ring0_addr: " + node0 in line)
-                or
-                (node1 is not None and "ring0_addr: " + node1 in line)
-            ):
-                node_match = True
-                removed_node = True
-            if "}" in line:
-                if not node_match:
-                    new_corosync_conf_lines.extend(node_buffer)
-                node_buffer = []
-                node_match = False
-        elif "node {" in line:
-            node_buffer.append(line)
-            in_node = True
-        else:
-            new_corosync_conf_lines.append(line)
-    new_corosync_conf = "\n".join(new_corosync_conf_lines) + "\n"
+    corosync_conf = getCorosyncConfParsed()
+    for nodelist in corosync_conf.get_sections("nodelist"):
+        for node in nodelist.get_sections("node"):
+            num_nodes_in_conf += 1
+            ring0_attrs = node.get_attributes("ring0_addr")
+            if ring0_attrs:
+                ring0_conf = ring0_attrs[0][1]
+                if (ring0_conf == node0) or (node1 and ring0_conf == node1):
+                    node.parent.del_section(node)
+                    removed_node = True
 
     if removed_node:
-        if num_nodes_in_conf == 3:
-            new_corosync_conf = addQuorumOption(new_corosync_conf,("two_node","1"))
-        setCorosyncConf(new_corosync_conf)
+        corosync_conf = autoset_2node_corosync(corosync_conf)
+        setCorosyncConf(str(corosync_conf))
 
     return removed_node
 
@@ -609,9 +614,9 @@ def removeNodeFromClusterConf(node):
     if node0 not in nodes:
         return False
 
-    output, retval = run(["/usr/sbin/ccs", "-f", settings.cluster_conf_file, "--rmnode", node0])
+    output, retval = run(["ccs", "-f", settings.cluster_conf_file, "--rmnode", node0])
     if retval != 0:
-        print output
+        print(output)
         err("error removing node: %s" % node)
 
     if len(nodes) == 3:
@@ -620,79 +625,48 @@ def removeNodeFromClusterConf(node):
         cman_options_map.pop("two_node", None)
         cman_options = ["%s=%s" % (n, v) for n, v in cman_options_map.items()]
         output, retval = run(
-            ["/usr/sbin/ccs", "-f", settings.cluster_conf_file, "--setcman"]
+            ["ccs", "-f", settings.cluster_conf_file, "--setcman"]
             + ["two_node=1", "expected_votes=1"]
             + cman_options
         )
         if retval != 0:
-            print output
+            print(output)
             err("unable to set cman options: expected_votes and two_node")
     return True
 
-# Adds an option to the quorum section to the corosync.conf passed in and
-# returns a string containing the updated corosync.conf
-# corosync_conf is a string containing the full corosync.conf 
-# option is a tuple with (option, value)
-def addQuorumOption(corosync_conf,option):
-    lines = corosync_conf.split("\n")
-    newlines = []
-    output = ""
-    done = False
-
-    inQuorum = False
-    for line in lines:
-        if inQuorum and line.startswith(option[0] + ":"):
-            line = option[0] + ": " + option[1]
-            done = True
-        if line.startswith("quorum {"):
-            inQuorum = True
-        newlines.append(line)
-
-    if not done:
-        inQuorum = False
-        for line in newlines:
-            if inQuorum and line.startswith("provider:"):
-                line = line + "\n" + option[0] + ": " + option[1]
-                done = True
-            if line.startswith("quorum {") and not done:
-                inQuorum = True
-            if line.startswith("}") and inQuorum:
-                inQuorum = False
-            if not inQuorum or not line == "":
-                output = output + line + "\n"
-
-    return output.rstrip('\n') + "\n"
-
-# Removes an option in the quorum section of the corosync.conf passed in and
-# returns a string containing the updated corosync.conf
-# corosync_conf is a string containing the full corosync.conf 
-# option is a tuple with (option, value)
-def rmQuorumOption(corosync_conf,option):
-    lines = corosync_conf.split("\n")
-    newlines = []
-    output = ""
-    done = False
-
-    inQuorum = False
-    for line in lines:
-        if inQuorum and line.startswith(option[0] + ":"):
-            continue
-        if line.startswith("quorum {"):
-            inQuorum = True
-        output = output + line + "\n"
-
-    return output.rstrip('\n') + "\n"
+def autoset_2node_corosync(corosync_conf):
+    node_count = 0
+    auto_tie_breaker = False
+
+    for nodelist in corosync_conf.get_sections("nodelist"):
+        node_count += len(nodelist.get_sections("node"))
+    quorum_sections = corosync_conf.get_sections("quorum")
+    for quorum in quorum_sections:
+        for attr in quorum.get_attributes("auto_tie_breaker"):
+            auto_tie_breaker = attr[1] == "1"
+
+    if node_count == 2 and not auto_tie_breaker:
+        for quorum in quorum_sections:
+            quorum.set_attribute("two_node", "1")
+        if not quorum_sections:
+            quorum = corosync_conf_utils.Section("quorum")
+            quorum.add_attribute("two_node", "1")
+            corosync_conf.add_section(quorum)
+    else:
+        for quorum in quorum_sections:
+            quorum.del_attributes_by_name("two_node")
+    return corosync_conf
 
 def getNextNodeID(corosync_conf):
     currentNodes = []
     highest = 0
-    corosync_conf = getCorosyncConf()
-    p = re.compile(r"nodeid:\s*([0-9]+)")
-    mall = p.findall(corosync_conf)
-    for m in mall:
-        currentNodes.append(int(m))
-        if int(m) > highest:
-            highest = int(m)
+    for nodelist in corosync_conf.get_sections("nodelist"):
+        for node in nodelist.get_sections("node"):
+            for attr in node.get_attributes("nodeid"):
+                nodeid = int(attr[1])
+                currentNodes.append(nodeid)
+                if nodeid > highest:
+                    highest = nodeid
 
     cur_test_id = highest
     while cur_test_id >= 1:
@@ -714,13 +688,13 @@ def parse_multiring_node(node):
             % node
         )
 
-def need_ring1_address(corosync_conf):
+def need_ring1_address(corosync_conf_text):
     if is_rhel6():
         # ring1 address is required regardless of transport
         # it has to be added to cluster.conf in order to set up ring1
         # in corosync by cman
         try:
-            dom = parseString(corosync_conf)
+            dom = parseString(corosync_conf_text)
         except xml.parsers.expat.ExpatError as e:
             err("Unable parse cluster.conf: %s" % e)
         rrp = False
@@ -729,23 +703,15 @@ def need_ring1_address(corosync_conf):
                 rrp = True
         return rrp
 
-    line_list = corosync_conf.split("\n")
-    in_totem = False
+    corosync_conf = getCorosyncConfParsed(text=corosync_conf_text)
     udpu_transport = False
     rrp = False
-    for line in line_list:
-        line = line.strip()
-        if in_totem:
-            if ":" in line:
-                name, value = map(lambda x: x.strip(), line.split(":"))
-                if name == "transport" and value == "udpu":
-                    udpu_transport = True
-                if name == "rrp_mode" and value in ["active", "passive"]:
-                    rrp = True
-            if "}" in line:
-                in_totem = False
-        if line.startswith("totem {"):
-            in_totem = True
+    for totem in corosync_conf.get_sections("totem"):
+        for attr in totem.get_attributes():
+            if attr[0] == "transport" and attr[1] == "udpu":
+                udpu_transport = True
+            if attr[0] == "rrp_mode" and attr[1] in ["active", "passive"]:
+                rrp = True
     return udpu_transport and rrp
 
 def is_cman_with_udpu_transport():
@@ -773,8 +739,14 @@ def subprocess_setup():
     signal.signal(signal.SIGPIPE, signal.SIG_DFL)
 
 # Run command, with environment and return (output, retval)
-def run(args, ignore_stderr=False, string_for_stdin=None):
-    env_var = dict(os.environ)
+def run(
+    args, ignore_stderr=False, string_for_stdin=None, env_extend=None,
+    binary_output=False
+):
+    if not env_extend:
+        env_extend = dict()
+    env_var = env_extend
+    env_var.update(dict(os.environ))
     if usefile:
         env_var["CIB_file"] = filename
 
@@ -787,15 +759,17 @@ def run(args, ignore_stderr=False, string_for_stdin=None):
     command = args[0]
     if command[0:3] == "crm" or command in ["cibadmin", "cman_tool", "iso8601"]:
         args[0] = settings.pacemaker_binaries + command
-    if command[0:8] == "corosync":
+    elif command[0:8] == "corosync":
         args[0] = settings.corosync_binaries + command
-        
+    elif command == "ccs":
+        args[0] = settings.ccs_binaries + command
+
     try:
         if "--debug" in pcs_options:
-            print "Running: " + " ".join(args)
+            print("Running: " + " ".join(args))
             if string_for_stdin:
-                print "--Debug Input Start--\n" + string_for_stdin
-                print "--Debug Input End--\n"
+                print("--Debug Input Start--\n" + string_for_stdin)
+                print("--Debug Input End--")
 
         # Some commands react differently if you give them anything via stdin
         if string_for_stdin != None:
@@ -803,22 +777,114 @@ def run(args, ignore_stderr=False, string_for_stdin=None):
         else:
             stdin_pipe = None
 
-        if ignore_stderr:
-            p = subprocess.Popen(args, stdin=stdin_pipe, stdout=subprocess.PIPE, stderr=subprocess.PIPE, env = env_var, preexec_fn=subprocess_setup)
-        else:
-            p = subprocess.Popen(args, stdin=stdin_pipe, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, env = env_var, preexec_fn=subprocess_setup)
+        p = subprocess.Popen(
+            args,
+            stdin=stdin_pipe,
+            stdout=subprocess.PIPE,
+            stderr=(subprocess.PIPE if ignore_stderr else subprocess.STDOUT),
+            preexec_fn=subprocess_setup,
+            env=env_var,
+            # decodes newlines and in python3 also converts bytes to str
+            universal_newlines=(not PYTHON2 and not binary_output)
+        )
         output,stderror = p.communicate(string_for_stdin)
         returnVal = p.returncode
         if "--debug" in pcs_options:
-            print "Return Value: " + str(returnVal)
-            print "--Debug Output Start--\n" + output
-            print "--Debug Output End--\n"
+            print("Return Value: {0}".format(returnVal))
+            print("--Debug Output Start--\n{0}".format(output), end="")
+            print("--Debug Output End--")
+            print()
     except OSError as e:
-        print e.strerror
+        print(e.strerror)
         err("unable to locate command: " + args[0])
 
     return output, returnVal
 
+def run_pcsdcli(command, data=None):
+    if not data:
+        data = dict()
+    env_var = dict()
+    if "--debug" in pcs_options:
+        env_var["PCSD_DEBUG"] = "true"
+    pcs_dir = os.path.realpath(os.path.dirname(sys.argv[0]))
+    if pcs_dir == "/usr/sbin":
+        pcsd_dir_path = settings.pcsd_exec_location
+    else:
+        pcsd_dir_path = os.path.join(pcs_dir, '../pcsd')
+    pcsdcli_path = os.path.join(pcsd_dir_path, 'pcsd-cli.rb')
+    gem_home = os.path.join(pcsd_dir_path, 'vendor/bundle/ruby')
+    env_var["GEM_HOME"] = gem_home
+    output, retval = run(
+        ["/usr/bin/ruby", "-I" + pcsd_dir_path, pcsdcli_path, command],
+        string_for_stdin=json.dumps(data),
+        env_extend=env_var
+    )
+    try:
+        output_json = json.loads(output)
+        for key in ['status', 'text', 'data']:
+            if key not in output_json:
+                output_json[key] = None
+    except ValueError:
+        output_json = {
+            'status': 'bad_json_output',
+            'text': output,
+            'data': None,
+        }
+    return output_json, retval
+
+def call_local_pcsd(argv, interactive_auth=False, std_in=None):
+    # some commands cannot be run under a non-root account
+    # so we pass those commands to locally running pcsd to execute them
+    # returns [list_of_errors, exit_code, stdout, stderr]
+    data = {
+        "command": json.dumps(argv),
+    }
+    if std_in:
+        data['stdin'] = std_in
+    data_send = urllib_urlencode(data)
+    code, output = sendHTTPRequest(
+        "localhost", "run_pcs", data_send, False, False
+    )
+
+    # authenticate against local pcsd and run again
+    if interactive_auth and 3 == code: # not authenticated
+        print('Please authenticate yourself to the local pcsd')
+        username = get_terminal_input('Username: ')
+        password = get_terminal_password()
+        cluster.auth_nodes_do(["localhost"], username, password, True, True)
+        print()
+        code, output = sendHTTPRequest(
+            "localhost", "run_pcs", data_send, False, False
+        )
+
+    if 3 == code: # not authenticated
+        # don't advise to run 'pcs cluster auth' as that is not used to auth
+        # to localhost
+        return [['Unable to authenticate to the local pcsd'], 1, '', '']
+    if 0 != code: # http error connecting to localhost
+        return [[output], 1, '', '']
+
+    try:
+        output_json = json.loads(output)
+        for key in ['status', 'data']:
+            if key not in output_json:
+                output_json[key] = None
+    except ValueError:
+        return [['Unable to communicate with pcsd'], 1, '', '']
+    if output_json['status'] == 'bad_command':
+        return [['Command not allowed'], 1, '', '']
+    if output_json['status'] == 'access_denied':
+        return [['Access denied'], 1, '', '']
+    if output_json['status'] != "ok" or not output_json["data"]:
+        return [['Unable to communicate with pcsd'], 1, '', '']
+    try:
+        exitcode = output_json["data"]["code"]
+        std_out = output_json["data"]["stdout"]
+        std_err = output_json["data"]["stderr"]
+        return [[], exitcode, std_out, std_err]
+    except KeyError:
+        return [['Unable to communicate with pcsd'], 1, '', '']
+
 def map_for_error_list(callab, iterab):
     error_list = []
     for item in iterab:
@@ -833,13 +899,13 @@ def run_node_threads(node_threads):
         thread.daemon = True
         thread.start()
     while node_threads:
-        for node in node_threads.keys():
+        for node in list(node_threads.keys()):
             thread = node_threads[node]
             thread.join(1)
             if thread.is_alive():
                 continue
             output = node + ": " + thread.output.strip()
-            print output
+            print(output)
             if thread.retval != 0:
                 error_list.append(output)
             del node_threads[node]
@@ -886,13 +952,17 @@ def dom_get_clone_ms_resource(dom, clone_ms_id):
         dom_get_master(dom, clone_ms_id)
     )
     if clone_ms:
-        for child in clone_ms.childNodes:
-            if (
-                child.nodeType == xml.dom.minidom.Node.ELEMENT_NODE
-                and
-                child.tagName in ["group", "primitive"]
-            ):
-                return child
+        return dom_elem_get_clone_ms_resource(clone_ms)
+    return None
+
+def dom_elem_get_clone_ms_resource(clone_ms):
+    for child in clone_ms.childNodes:
+        if (
+            child.nodeType == xml.dom.minidom.Node.ELEMENT_NODE
+            and
+            child.tagName in ["group", "primitive"]
+        ):
+            return child
     return None
 
 def dom_get_resource_clone_ms_parent(dom, resource_id):
@@ -901,6 +971,9 @@ def dom_get_resource_clone_ms_parent(dom, resource_id):
         or
         dom_get_group(dom, resource_id)
     )
+    return dom_elem_get_resource_clone_ms_parent(resource)
+
+def dom_elem_get_resource_clone_ms_parent(resource):
     clone = resource
     while True:
         if not isinstance(clone, xml.dom.minidom.Element):
@@ -909,40 +982,24 @@ def dom_get_resource_clone_ms_parent(dom, resource_id):
             return clone
         clone = clone.parentNode
 
-# deprecated, use dom_get_master
-def is_master(ms_id):
-    return does_exist("//master[@id='"+ms_id+"']")
-
 def dom_get_master(dom, master_id):
     for master in dom.getElementsByTagName("master"):
         if master.getAttribute("id") == master_id:
             return master
     return None
 
-# deprecated, use dom_get_clone
-def is_clone(clone_id):
-    return does_exist("//clone[@id='"+clone_id+"']")
-
 def dom_get_clone(dom, clone_id):
     for clone in dom.getElementsByTagName("clone"):
         if clone.getAttribute("id") == clone_id:
             return clone
     return None
 
-# deprecated, use dom_get_group
-def is_group(group_id):
-    return does_exist("//group[@id='"+group_id+"']")
-
 def dom_get_group(dom, group_id):
     for group in dom.getElementsByTagName("group"):
         if group.getAttribute("id") == group_id:
             return group
     return None
 
-# deprecated, use dom_get_group_clone
-def is_group_clone(group_id):
-    return does_exist("//clone//group[@id='"+group_id+"']")
-
 def dom_get_group_clone(dom, group_id):
     for clone in dom.getElementsByTagName("clone"):
         group = dom_get_group(clone, group_id)
@@ -957,23 +1014,26 @@ def dom_get_group_masterslave(dom, group_id):
             return group
     return None
 
-# deprecated, use dom_get_resource
-def is_resource(resource_id):
-    return does_exist("//primitive[@id='"+resource_id+"']")
-
 def dom_get_resource(dom, resource_id):
     for primitive in dom.getElementsByTagName("primitive"):
         if primitive.getAttribute("id") == resource_id:
             return primitive
     return None
 
+def dom_get_any_resource(dom, resource_id):
+    return (
+        dom_get_resource(dom, resource_id)
+        or
+        dom_get_group(dom, resource_id)
+        or
+        dom_get_clone(dom, resource_id)
+        or
+        dom_get_master(dom, resource_id)
+    )
+
 def is_stonith_resource(resource_id):
     return does_exist("//primitive[@id='"+resource_id+"' and @class='stonith']")
 
-# deprecated, use dom_get_resource_clone
-def is_resource_clone(resource_id):
-    return does_exist("//clone//primitive[@id='"+resource_id+"']")
-
 def dom_get_resource_clone(dom, resource_id):
     for clone in dom.getElementsByTagName("clone"):
         resource = dom_get_resource(clone, resource_id)
@@ -981,10 +1041,6 @@ def dom_get_resource_clone(dom, resource_id):
             return resource
     return None
 
-# deprecated, use dom_get_resource_masterslave
-def is_resource_masterslave(resource_id):
-    return does_exist("//master//primitive[@id='"+resource_id+"']")
-
 def dom_get_resource_masterslave(dom, resource_id):
     for master in dom.getElementsByTagName("master"):
         resource = dom_get_resource(master, resource_id)
@@ -992,16 +1048,6 @@ def dom_get_resource_masterslave(dom, resource_id):
             return resource
     return None
 
-# deprecated, use dom_get_resource_clone_ms_parent
-def get_resource_master_id(resource_id):
-    dom = get_cib_dom()
-    primitives = dom.getElementsByTagName("primitive")
-    for p in primitives:
-        if p.getAttribute("id") == resource_id:
-            if p.parentNode.tagName == "master":
-                return p.parentNode.getAttribute("id")
-    return None
-
 # returns tuple (is_valid, error_message, correct_resource_id_if_exists)
 def validate_constraint_resource(dom, resource_id):
     resource_el = (
@@ -1070,6 +1116,12 @@ def dom_get_element_with_id(dom, tag_name, element_id):
             return elem
     return None
 
+def dom_get_node(dom, node_name):
+    for e in dom.getElementsByTagName("node"):
+        if e.hasAttribute("uname") and e.getAttribute("uname") == node_name:
+            return e
+    return None
+
 def dom_get_children_by_tag_name(dom_el, tag_name):
     return [
         node
@@ -1103,140 +1155,6 @@ def dom_attrs_to_list(dom_el, with_id=False):
         attributes.append("(id:%s)" % (dom_el.getAttribute("id")))
     return attributes
 
-# Check if resource is started (or stopped) for 'wait' seconds
-# options for started mode:
-#   count - do not success unless 'count' instances of the resource are Started
-#       or Master (Slave does not count)
-#   allowed_nodes - do not success if resource is running on any other node
-#   banned_nodes - do not success if resource is running on any banned node
-#   desired_nodes - do not success unless resource is running on all desired
-#       nodes
-#   cluster state - use passed cluster state instead of live one
-# options for stopped mode:
-#   desired_nodes - do not success unless resource is stopped on all desired
-#       nodes
-# options for both:
-#   slave_as_started - consider Slave role as started, otherwise only Started
-#       and Master are considered
-def is_resource_started(
-    resource, wait, stopped=False,
-    count=None, allowed_nodes=None, banned_nodes=None, desired_nodes=None,
-    cluster_state=None, slave_as_started=False
-):
-    running_roles = set(("Started", "Master"))
-    if slave_as_started:
-        running_roles.add("Slave")
-    timeout = False
-    fail = False
-    success = False
-    resource_original = resource
-    nodes_running_original = set()
-    set_allowed_nodes = set(allowed_nodes) if allowed_nodes else allowed_nodes
-    set_banned_nodes = set(banned_nodes) if banned_nodes else banned_nodes
-    set_desired_nodes = set(desired_nodes) if desired_nodes else desired_nodes
-    expire_time = time.time() + wait
-    while not fail and not success and not timeout:
-        state = cluster_state if cluster_state else getClusterState()
-        cib_dom = get_cib_dom()
-        node_count = len(cib_dom.getElementsByTagName("node"))
-        resource = get_resource_for_running_check(state, resource, stopped)
-        running_on = resource_running_on(resource_original, state)
-        if not nodes_running_original:
-            nodes_running_original = set(
-                running_on["nodes_started"] + running_on["nodes_master"]
-            )
-            if slave_as_started:
-                nodes_running_original.update(running_on["nodes_slave"])
-        failed_op_list = get_lrm_rsc_op_failed(cib_dom, resource)
-        resources = state.getElementsByTagName("resource")
-        all_stopped = True
-        for res in resources:
-            # If resource is a clone it can have an id of '<resource name>:N'
-            if res.getAttribute("id") == resource or res.getAttribute("id").startswith(resource+":"):
-                list_running_on = (
-                    running_on["nodes_started"] + running_on["nodes_master"]
-                )
-                if slave_as_started:
-                    list_running_on.extend(running_on["nodes_slave"])
-                set_running_on = set(list_running_on)
-                if stopped:
-                    if (
-                        res.getAttribute("role") != "Stopped"
-                        or
-                        (
-                            res.getAttribute("role") == "Stopped"
-                            and
-                            res.getAttribute("failed") == "true"
-                        )
-                    ):
-                        if desired_nodes:
-                            for node in res.getElementsByTagName("node"):
-                                if node.getAttribute("name") in desired_nodes:
-                                    all_stopped = False
-                        else:
-                            all_stopped = False
-                    nodes_failed = set()
-                    for op in failed_op_list:
-                        if op.getAttribute("operation") in ["stop", "demote"]:
-                            nodes_failed.add(op.getAttribute("on_node"))
-                    if nodes_failed >= nodes_running_original:
-                        fail = True
-                else:
-                    if (
-                        res.getAttribute("role") in running_roles
-                        and
-                        res.getAttribute("failed") != "true"
-                        and
-                        (count is None or len(list_running_on) == count)
-                        and
-                        (
-                            not banned_nodes
-                            or
-                            set_running_on.isdisjoint(set_banned_nodes)
-                        )
-                        and
-                        (
-                            not allowed_nodes
-                            or
-                            set_running_on <= set_allowed_nodes
-                        )
-                        and
-                        (
-                            not desired_nodes
-                            or
-                            set_running_on >= set_desired_nodes
-                        )
-                    ):
-                        success = True
-                    # check for failures but give pacemaker a chance to try
-                    # to start the resource on another node (it will try anyway
-                    # so don't report fail prematurely)
-                    nodes_failed = set()
-                    for op in failed_op_list:
-                        if op.getAttribute("operation") in ["start", "promote"]:
-                            nodes_failed.add(op.getAttribute("on_node"))
-                    if (
-                        len(nodes_failed) >= node_count
-                        or
-                        (allowed_nodes and set(allowed_nodes) == nodes_failed)
-                    ):
-                        fail = True
-        if stopped and all_stopped:
-            success = True
-        if (expire_time < time.time()):
-            timeout = True
-        if not timeout:
-            time.sleep(0.25)
-    message = ""
-    if not success and timeout and not failed_op_list:
-        message += "waiting timed out\n"
-    message += running_on["message"]
-    if failed_op_list:
-        failed_op_list.sort(key=lambda x: x.getAttribute("on_node"))
-        message += "\nResource failures:\n  "
-        message += "\n  ".join(get_lrm_rsc_op_failures(failed_op_list))
-    return success, message
-
 def get_resource_for_running_check(cluster_state, resource_id, stopped=False):
     for clone in cluster_state.getElementsByTagName("clone"):
         if clone.getAttribute("id") == resource_id:
@@ -1267,137 +1185,13 @@ def get_resource_for_running_check(cluster_state, resource_id, stopped=False):
             resource_id = elem.getAttribute("id")
     return resource_id
 
-# op_list can be obtained from get_operations_from_transitions
-# it looks like this: [(resource_id, operation, node), ...]
-def wait_for_primitive_ops_to_process(op_list, timeout=None):
-    if timeout:
-        timeout = int(timeout)
-        start_time = time.time()
-    else:
-        cib_dom = get_cib_dom()
-
-    for op in op_list:
-        print "Waiting for '%s' to %s on %s" % (op[0], op[1], op[2])
-        if timeout:
-            remaining_timeout = timeout - (time.time() - start_time)
-        else:
-            remaining_timeout = get_resource_op_timeout(cib_dom, op[0], op[1])
-        # crm_simulate can start resources as slave and promote them later
-        # so we need to consider slave resources as started
-        success, message = is_resource_started(
-            op[0], remaining_timeout, op[1] == "stop",
-            desired_nodes=[op[2]], slave_as_started=(op[1] == "start")
-        )
-        if success:
-            print message
-        else:
-            err(
-                "Unable to %s '%s' on %s\n%s"
-                % (op[1], op[0], op[2], message)
-            )
-
-def get_resource_status_for_wait(dom, resource_el, node_count):
-    res_id = resource_el.getAttribute("id")
-    clone_ms_parent = dom_get_resource_clone_ms_parent(dom, res_id)
-    meta_resource_el = clone_ms_parent if clone_ms_parent else resource_el
-    status_running = is_resource_started(res_id, 0)[0]
-    status_enabled = True
-    for meta in meta_resource_el.getElementsByTagName("meta_attributes"):
-        for nvpair in meta.getElementsByTagName("nvpair"):
-            if nvpair.getAttribute("name") == "target-role":
-                if nvpair.getAttribute("value").lower() == "stopped":
-                    status_enabled = False
-    status_instances = count_expected_resource_instances(
-        meta_resource_el, node_count
-    )
-    return {
-        "running": status_running,
-        "enabled": status_enabled,
-        "instances": status_instances,
-    }
-
-def get_resource_wait_decision(old_status, new_status):
-    wait_for_start = False
-    wait_for_stop = False
-    if old_status["running"] and not new_status["enabled"]:
-        wait_for_stop = True
-    elif (
-        not old_status["running"]
-        and
-        (not old_status["enabled"] and new_status["enabled"])
-    ):
-        wait_for_start = True
-    elif (
-        old_status["running"]
-        and
-        old_status["instances"] != new_status["instances"]
-    ):
-        wait_for_start = True
-    return wait_for_start, wait_for_stop
-
-def get_lrm_rsc_op(cib, resource, op_list=None, last_call_id=None):
-    lrm_rsc_op_list = []
-    for lrm_resource in cib.getElementsByTagName("lrm_resource"):
-        if lrm_resource.getAttribute("id") != resource:
-            continue
-        for lrm_rsc_op in lrm_resource.getElementsByTagName("lrm_rsc_op"):
-            if op_list and lrm_rsc_op.getAttribute("operation") not in op_list:
-                continue
-            if (
-                last_call_id is not None
-                and
-                int(lrm_rsc_op.getAttribute("call-id")) <= int(last_call_id)
-            ):
-                continue
-            if not lrm_rsc_op.getAttribute("on_node"):
-                state = dom_get_parent_by_tag_name(lrm_rsc_op, "node_state")
-                if state:
-                    lrm_rsc_op.setAttribute(
-                        "on_node", state.getAttribute("uname")
-                    )
-            lrm_rsc_op_list.append(lrm_rsc_op)
-    lrm_rsc_op_list.sort(key=lambda x: int(x.getAttribute("call-id")))
-    return lrm_rsc_op_list
-
-def get_lrm_rsc_op_failed(cib, resource, op_list=None, last_call_id=None):
-    failed_op_list = []
-    for op in get_lrm_rsc_op(cib, resource, op_list, last_call_id):
-        if (
-            op.getAttribute("operation") == "monitor"
-            and
-            op.getAttribute("rc-code") == "7"
-        ):
-            continue
-        if op.getAttribute("rc-code") != "0":
-            failed_op_list.append(op)
-    return failed_op_list
-
-def get_lrm_rsc_op_failures(lrm_rsc_op_list):
-    failures = []
-    for rsc_op in lrm_rsc_op_list:
-        if rsc_op.getAttribute("rc-code") == "0":
-            continue
-        reason = rsc_op.getAttribute("exit-reason")
-        if not reason:
-            reason = "failed"
-        node = rsc_op.getAttribute("on_node")
-        if not node:
-            state = dom_get_parent_by_tag_name(rsc_op, "node_state")
-            if state:
-                node = state.getAttribute("uname")
-        if node:
-            failures.append("%s: %s" % (node, reason))
-        else:
-            failures.append(reason)
-    return failures
-
-def resource_running_on(resource, passed_state=None):
+def resource_running_on(resource, passed_state=None, stopped=False):
     nodes_started = []
     nodes_master = []
     nodes_slave = []
     state = passed_state if passed_state else getClusterState()
     resource_original = resource
-    resource = get_resource_for_running_check(state, resource)
+    resource = get_resource_for_running_check(state, resource, stopped)
     resources = state.getElementsByTagName("resource")
     for res in resources:
         # If resource is a clone it can have an id of '<resource name>:N'
@@ -1442,36 +1236,12 @@ def resource_running_on(resource, passed_state=None):
             % (resource_original, "; ".join(message_parts))
     return {
         "message": message,
+        "is_running": bool(nodes_started or nodes_master or nodes_slave),
         "nodes_started": nodes_started,
         "nodes_master": nodes_master,
         "nodes_slave": nodes_slave,
     }
 
-# get count of expected running instances of a resource
-# counts promoted instances for master/slave resource
-def count_expected_resource_instances(res_el, node_count):
-    if res_el.tagName in ["primitive", "group"]:
-        return 1
-    unique = dom_get_meta_attr_value(res_el, "globally-unique") == "true"
-    clone_max = dom_get_meta_attr_value(res_el, "clone-max")
-    clone_max = int(clone_max) if clone_max else node_count
-    clone_node_max = dom_get_meta_attr_value(res_el, "clone-node-max")
-    clone_node_max = int(clone_node_max) if clone_node_max else 1
-    if res_el.tagName == "master":
-        master_max = dom_get_meta_attr_value(res_el, "master-max")
-        master_max = int(master_max) if master_max else 1
-        master_node_max = dom_get_meta_attr_value(res_el, "master-node-max")
-        master_node_max = int(master_node_max) if master_node_max else 1
-        if unique:
-            return min(clone_max, master_max, node_count * clone_node_max)
-        else:
-            return min(clone_max, master_max, node_count)
-    else:
-        if unique:
-            return min(clone_max, node_count * clone_node_max)
-        else:
-            return min(clone_max, node_count)
-
 def does_resource_have_options(ra_type):
     if ra_type.startswith("ocf:") or ra_type.startswith("stonith:") or ra_type.find(':') == -1:
         return True
@@ -1494,7 +1264,7 @@ def get_default_op_values(ra_type):
     return_list = []
     try:
         root = ET.fromstring(metadata)
-        actions = root.findall(".//actions/action")
+        actions = root.findall(str(".//actions/action"))
         for action in actions:
             if action.attrib["name"] in allowable_operations:
                 new_operation = []
@@ -1515,63 +1285,42 @@ def get_default_op_values(ra_type):
 def get_timeout_seconds(timeout, return_unknown=False):
     if timeout.isdigit():
         return int(timeout)
-    if timeout.endswith("s") and timeout[:-1].isdigit():
-        return int(timeout[:-1])
-    if timeout.endswith("min") and timeout[:-3].isdigit():
-        return int(timeout[:-3]) * 60
+    suffix_multiplier = {
+        "s": 1,
+        "sec": 1,
+        "m": 60,
+        "min": 60,
+        "h": 3600,
+        "hr": 3600,
+    }
+    for suffix, multiplier in suffix_multiplier.items():
+        if timeout.endswith(suffix) and timeout[:-len(suffix)].isdigit():
+            return int(timeout[:-len(suffix)]) * multiplier
     return timeout if return_unknown else None
 
-def get_default_op_timeout():
-    output, retVal = run([
-        "crm_attribute", "--type", "op_defaults", "--name", "timeout",
-        "--query", "--quiet"
-    ])
-    if retVal == 0 and output.strip():
-        timeout = get_timeout_seconds(output)
-        if timeout is not None:
-            return timeout
-
-    properties = prop.get_set_properties(defaults=prop.get_default_properties())
-    if properties["default-action-timeout"]:
-        timeout = get_timeout_seconds(properties["default-action-timeout"])
-        if timeout is not None:
-            return timeout
+def check_pacemaker_supports_resource_wait():
+    output, retval = run(["crm_resource", "-?"])
+    if "--wait" not in output:
+        err("crm_resource does not support --wait, please upgrade pacemaker")
 
-    return settings.default_wait
-
-def get_resource_op_timeout(cib_dom, resource, operation):
-    resource_el = dom_get_resource(cib_dom, resource)
-    if resource_el:
-        for op_el in resource_el.getElementsByTagName("op"):
-            if op_el.getAttribute("name") == operation:
-                timeout = get_timeout_seconds(op_el.getAttribute("timeout"))
-                if timeout is not None:
-                    return timeout
-
-        defaults = get_default_op_values(
-            "%s:%s:%s"
-            % (
-                resource_el.getAttribute("class"),
-                resource_el.getAttribute("provider"),
-                resource_el.getAttribute("type"),
-            )
+def validate_wait_get_timeout():
+    check_pacemaker_supports_resource_wait()
+    if usefile:
+        err("Cannot use '-f' together with '--wait'")
+    wait_timeout = pcs_options["--wait"]
+    if wait_timeout is None:
+        return wait_timeout
+    wait_timeout = get_timeout_seconds(wait_timeout)
+    if wait_timeout is None:
+        err(
+            "%s is not a valid number of seconds to wait"
+            % pcs_options["--wait"]
         )
-        for op in defaults:
-            if op[0] == operation:
-                for op_setting in op[1:]:
-                    match = re.match("timeout=(.+)", op_setting)
-                    if match:
-                        timeout = get_timeout_seconds(match.group(1))
-                        if timeout is not None:
-                            return timeout
-
-    return get_default_op_timeout()
+    return wait_timeout
 
 # Check and see if the specified resource (or stonith) type is present on the
 # file system and properly responds to a meta-data request
 def is_valid_resource(resource, caseInsensitiveCheck=False):
-    found_resource = False
-    stonith_resource = False
     if resource.startswith("ocf:"):
         resource_split = resource.split(":",3)
         if len(resource_split) != 3:
@@ -1579,9 +1328,20 @@ def is_valid_resource(resource, caseInsensitiveCheck=False):
         providers = [resource_split[1]]
         resource = resource_split[2]
     elif resource.startswith("stonith:"):
-        stonith_resource = True
         resource_split = resource.split(":", 2)
         stonith = resource_split[1]
+        metadata = get_stonith_metadata("/usr/sbin/" + stonith)
+        if metadata != False:
+            return True
+        else:
+            return False
+    elif resource.startswith("nagios:"):
+        # search for nagios script
+        resource_split = resource.split(":", 2)
+        if os.path.isfile("/usr/share/pacemaker/nagios/plugins-metadata/%s.xml" % resource_split[1]):
+            return True
+        else:
+            return False
     elif resource.startswith("lsb:"):
         resource_split = resource.split(":",2)
         lsb_ra = resource_split[1]
@@ -1599,29 +1359,25 @@ def is_valid_resource(resource, caseInsensitiveCheck=False):
     else:
         providers = sorted(os.listdir("/usr/lib/ocf/resource.d"))
 
-    if stonith_resource:
-        metadata = get_stonith_metadata("/usr/sbin/" + stonith)
-        if metadata != False:
-            found_resource = True
-    else:
-        for provider in providers:
-            filepath = "/usr/lib/ocf/resource.d/" + provider + "/"
-            if caseInsensitiveCheck:
-                if os.path.isdir(filepath):
-                    all_files = [ f for f in os.listdir(filepath ) ]
-                    for f in all_files:
-                        if f.lower() == resource.lower() and os.path.isfile(filepath + f):
-                            return "ocf:" + provider + ":" + f
-                    continue
-
-            metadata = get_metadata(filepath + resource)
-            if metadata == False:
+    # search for ocf script
+    for provider in providers:
+        filepath = "/usr/lib/ocf/resource.d/" + provider + "/"
+        if caseInsensitiveCheck:
+            if os.path.isdir(filepath):
+                all_files = [ f for f in os.listdir(filepath ) ]
+                for f in all_files:
+                    if f.lower() == resource.lower() and os.path.isfile(filepath + f):
+                        return "ocf:" + provider + ":" + f
                 continue
-            else:
-                found_resource = True
-                break
 
-    return found_resource
+        metadata = get_metadata(filepath + resource)
+        if metadata == False:
+            continue
+        else:
+            # found it
+            return True
+
+    return False
 
 # Get metadata from resource agent
 def get_metadata(resource_agent_script):
@@ -1648,11 +1404,11 @@ def get_default_stonith_options():
     (metadata, retval) = run([settings.stonithd_binary, "metadata"],True)
     if retval == 0:
         root = ET.fromstring(metadata)
-        params = root.findall(".//parameter")
+        params = root.findall(str(".//parameter"))
         default_params = []
         for param in params:
             adv_param = False
-            for short_desc in param.findall(".//shortdesc"):
+            for short_desc in param.findall(str(".//shortdesc")):
                 if short_desc.text.startswith("Advanced use only"):
                     adv_param = True
             if adv_param == False:
@@ -1695,9 +1451,21 @@ def get_cib_etree():
     except:
         err("unable to get cib")
 
+def is_etree(var):
+    return (
+        var.__class__ == xml.etree.ElementTree.Element
+        or
+        (
+            # in python3 _ElementInterface does not exist
+            hasattr(xml.etree.ElementTree, "_ElementInterface")
+            and
+            var.__class__ == xml.etree.ElementTree._ElementInterface
+        )
+    )
+
 # Replace only configuration section of cib with dom passed
 def replace_cib_configuration(dom):
-    if dom.__class__ == xml.etree.ElementTree.Element or dom.__class__ == xml.etree.ElementTree._ElementInterface:
+    if is_etree(dom):
         new_dom = ET.tostring(dom)
     else:
         new_dom = dom.toxml()
@@ -1713,8 +1481,8 @@ def is_valid_cib_scope(scope):
 
 # Checks to see if id exists in the xml dom passed
 def does_id_exist(dom, check_id):
-    if dom.__class__ == xml.etree.ElementTree.Element or dom.__class__ == xml.etree.ElementTree._ElementInterface:
-        for elem in dom.findall(".//*"):
+    if is_etree(dom):
+        for elem in dom.findall(str(".//*")):
             if elem.get("id") == check_id:
                 return True
     else:
@@ -1738,6 +1506,7 @@ def find_unique_id(dom, check_id):
 # operations
 # pacemaker differentiates between operations only by name and interval
 def operation_exists(operations_el, op_el):
+    existing = []
     op_name = op_el.getAttribute("name")
     op_interval = get_timeout_seconds(op_el.getAttribute("interval"), True)
     for op in operations_el.getElementsByTagName("op"):
@@ -1746,7 +1515,34 @@ def operation_exists(operations_el, op_el):
             and
             get_timeout_seconds(op.getAttribute("interval"), True) == op_interval
         ):
-            return op
+            existing.append(op)
+    return existing
+
+def operation_exists_by_name(operations_el, op_el):
+    existing = []
+    op_name = op_el.getAttribute("name")
+    op_role = op_el.getAttribute("role") or "Started"
+    ocf_check_level = None
+    if "monitor" == op_name:
+        ocf_check_level = get_operation_ocf_check_level(op_el)
+
+    for op in operations_el.getElementsByTagName("op"):
+        if op.getAttribute("name") == op_name:
+            if op_name != "monitor":
+                existing.append(op)
+            elif (
+                (op.getAttribute("role") or "Started") == op_role
+                and
+                ocf_check_level == get_operation_ocf_check_level(op)
+            ):
+                existing.append(op)
+    return existing
+
+def get_operation_ocf_check_level(operation_el):
+    for attr_el in operation_el.getElementsByTagName("instance_attributes"):
+        for nvpair_el in attr_el.getElementsByTagName("nvpair"):
+            if nvpair_el.getAttribute("name") == "OCF_CHECK_LEVEL":
+                return nvpair_el.getAttribute("value")
     return None
 
 def set_unmanaged(resource):
@@ -1799,17 +1595,23 @@ def get_node_attributes():
     dom = parseString(node_config).documentElement
     for node in dom.getElementsByTagName("node"):
         nodename = node.getAttribute("uname")
-        for nvp in node.getElementsByTagName("nvpair"):
-            if nodename not in nas:
-                nas[nodename] = []
-            nas[nodename].append(nvp.getAttribute("name") + "=" + nvp.getAttribute("value"))
+        for attributes in node.getElementsByTagName("instance_attributes"):
+            for nvp in attributes.getElementsByTagName("nvpair"):
+                if nodename not in nas:
+                    nas[nodename] = []
+                nas[nodename].append(nvp.getAttribute("name") + "=" + nvp.getAttribute("value"))
+            break
     return nas
 
 def set_node_attribute(prop, value, node):
     if (value == ""):
         o,r = run(["crm_attribute", "-t", "nodes", "--node", node, "--name",prop,"--query"])
         if r != 0 and "--force" not in pcs_options:
-            err("attribute: '%s' doesn't exist for node: '%s'" % (prop,node))
+            err(
+                "attribute: '%s' doesn't exist for node: '%s'" % (prop, node),
+                False
+            )
+            sys.exit(2)
         o,r = run(["crm_attribute", "-t", "nodes", "--node", node, "--name",prop,"--delete"])
     else:
         o,r = run(["crm_attribute", "-t", "nodes", "--node", node, "--name",prop,"--update",value])
@@ -1863,27 +1665,44 @@ def setAttribute(a_type, a_name, a_value):
 
     output, retval = run(args)
     if retval != 0:
-        print output
+        print(output)
 
 def getTerminalSize(fd=1):
     """
     Returns height and width of current terminal. First tries to get
     size via termios.TIOCGWINSZ, then from environment. Defaults to 25
     lines x 80 columns if both methods fail.
- 
+
     :param fd: file descriptor (default: 1=stdout)
     """
     try:
         import fcntl, termios, struct
-        hw = struct.unpack('hh', fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234'))
+        hw = struct.unpack(
+            str('hh'),
+            fcntl.ioctl(fd, termios.TIOCGWINSZ, '1234')
+        )
     except:
         try:
             hw = (os.environ['LINES'], os.environ['COLUMNS'])
-        except:  
+        except:
             hw = (25, 80)
- 
     return hw
 
+def get_terminal_input(message=None):
+    if message:
+        sys.stdout.write('Username: ')
+        sys.stdout.flush()
+    if PYTHON2:
+        return raw_input("")
+    else:
+        return input("")
+
+def get_terminal_password(message="Password: "):
+    if sys.stdout.isatty():
+        return getpass.getpass(message)
+    else:
+        return get_terminal_input(message)
+
 # Returns an xml dom containing the current status of the cluster
 def getClusterState():
     (output, retval) = run(["crm_mon", "-1", "-X","-r"])
@@ -1903,18 +1722,18 @@ def stonithCheck():
     et = get_cib_etree()
     cps = et.find("configuration/crm_config/cluster_property_set")
     if cps != None:
-        for prop in cps.findall("nvpair"):
+        for prop in cps.findall(str("nvpair")):
             if 'name' in prop.attrib and prop.attrib["name"] == "stonith-enabled":
                 if prop.attrib["value"] == "off" or \
                         prop.attrib["value"] == "false":
                     return False
         
-    primitives = et.findall("configuration/resources/primitive")
+    primitives = et.findall(str("configuration/resources/primitive"))
     for p in primitives:
         if p.attrib["class"] == "stonith":
             return False
 
-    primitives = et.findall("configuration/resources/clone/primitive")
+    primitives = et.findall(str("configuration/resources/clone/primitive"))
     for p in primitives:
         if p.attrib["class"] == "stonith":
             return False
@@ -1922,27 +1741,37 @@ def stonithCheck():
     return True
 
 def getCorosyncNodesID(allow_failure=False):
-    if is_rhel6():
-        output, retval = run(["cman_tool", "nodes", "-F", "id,name"])
-        if retval != 0:
-            if allow_failure:
-                return {}
-            else:
-                err("unable to get list of corosync nodes")
-        nodeid_re = re.compile(r"^(.)\s+([^\s]+)\s*$", re.M)
-        return dict([
-            (node_id, node_name)
-            for node_id, node_name in nodeid_re.findall(output)
-        ])
+    if os.getuid() == 0:
+        if is_rhel6():
+            output, retval = run(["cman_tool", "nodes", "-F", "id,name"])
+            if retval != 0:
+                if allow_failure:
+                    return {}
+                else:
+                    err("unable to get list of corosync nodes")
+            nodeid_re = re.compile(r"^(.)\s+([^\s]+)\s*$", re.M)
+            return dict([
+                (node_id, node_name)
+                for node_id, node_name in nodeid_re.findall(output)
+            ])
+
+        (output, retval) = run(['corosync-cmapctl', '-b', 'nodelist.node'])
+    else:
+        err_msgs, retval, output, std_err = call_local_pcsd(
+            ['status', 'nodes', 'corosync-id'], True
+        )
+        if err_msgs:
+            for msg in err_msgs:
+                err(msg, False)
+            sys.exit(1)
 
-    cs_nodes = {}
-    (output, retval) = run(['corosync-cmapctl', '-b', 'nodelist.node'])
     if retval != 0:
         if allow_failure:
             return {}
         else:
             err("unable to get list of corosync nodes")
 
+    cs_nodes = {}
     node_list_node_mapping = {}
     for line in output.rstrip().split("\n"):
         m = re.match("nodelist.node.(\d+).nodeid.*= (.*)",line)
@@ -1956,8 +1785,19 @@ def getCorosyncNodesID(allow_failure=False):
     return cs_nodes
 
 # Warning, if a node has never started the hostname may be '(null)'
+#TODO This doesn't work on CMAN clusters at all and should be removed completely
 def getPacemakerNodesID(allow_failure=False):
-    (output, retval) = run(['crm_node', '-l'])
+    if os.getuid() == 0:
+        (output, retval) = run(['crm_node', '-l'])
+    else:
+        err_msgs, retval, output, std_err = call_local_pcsd(
+            ['status', 'nodes', 'pacemaker-id'], True
+        )
+        if err_msgs:
+            for msg in err_msgs:
+                err(msg, False)
+            sys.exit(1)
+
     if retval != 0:
         if allow_failure:
             return {}
@@ -1966,12 +1806,14 @@ def getPacemakerNodesID(allow_failure=False):
 
     pm_nodes = {}
     for line in output.rstrip().split("\n"):
-        node_info = line.rstrip().split(" ",1)
-        pm_nodes[node_info[0]] = node_info[1]
+        node_info = line.rstrip().split(" ")
+        if len(node_info) <= 2 or node_info[2] != "lost":
+            pm_nodes[node_info[0]] = node_info[1]
 
     return pm_nodes
 
 def corosyncPacemakerNodeCheck():
+    # does not work on CMAN clusters
     pm_nodes = getPacemakerNodesID()
     cs_nodes = getCorosyncNodesID()
 
@@ -2026,7 +1868,7 @@ def validInstanceAttributes(res_id, ra_values, resource_type):
     bad_parameters = []
     try:
         actions = ET.fromstring(metadata).find("parameters")
-        for action in actions.findall("parameter"):
+        for action in actions.findall(str("parameter")):
             valid_parameters.append(action.attrib["name"])
             if "required" in action.attrib and action.attrib["required"] == "1":
 # If a default value is set, then the attribute isn't really required (for 'action' on stonith devices only)
@@ -2051,42 +1893,19 @@ def validInstanceAttributes(res_id, ra_values, resource_type):
 
     if missing_required_parameters:
         if resClass == "stonith" and "port" in missing_required_parameters:
-            if (
-                "pcmk_host_argument" in ra_values
-                or
-                "pcmk_host_map" in ra_values
-                or
-                "pcmk_host_list" in ra_values
-            ):
-                missing_required_parameters.remove("port")
+            # Temporarily make "port" an optional parameter. Once we are
+            # getting metadata from pacemaker, this will be reviewed and fixed.
+            #if (
+            #    "pcmk_host_argument" in ra_values
+            #    or
+            #    "pcmk_host_map" in ra_values
+            #    or
+            #    "pcmk_host_list" in ra_values
+            #):
+            missing_required_parameters.remove("port")
 
     return bad_parameters, missing_required_parameters 
 
-def generate_rrp_corosync_config(interface):
-    interface = str(interface)
-    if interface == "0":
-        mcastaddr = "239.255.1.1"
-    else:
-        mcastaddr = "239.255.2.1"
-    mcastport = "5405"
-
-    ir = "  interface {\n"
-    ir += "    ringnumber: %s\n" % interface
-    ir += "    bindnetaddr: " + pcs_options["--addr"+interface] + "\n"
-    if "--broadcast" + interface in pcs_options:
-        ir += "    broadcast: yes\n"
-    else:
-        if "--mcast" + interface in pcs_options:
-            mcastaddr = pcs_options["--mcast"+interface]
-        ir += "    mcastaddr: " + mcastaddr + "\n"
-        if "--mcastport"+interface in pcs_options:
-            mcastport = pcs_options["--mcastport"+interface]
-        ir += "    mcastport: " + mcastport + "\n"
-        if "--ttl" + interface in pcs_options:
-            ir += "    ttl: " + pcs_options["--ttl"+interface] + "\n"
-    ir += "  }\n"
-    return ir
-
 def getClusterName():
     if is_rhel6():
         try:
@@ -2098,15 +1917,18 @@ def getClusterName():
     else:
         try:
             f = open(settings.corosync_conf_file,'r')
-        except IOError as e:
+            conf = corosync_conf_utils.parse_string(f.read())
+            f.close()
+            # mimic corosync behavior - the last cluster_name found is used
+            cluster_name = None
+            for totem in conf.get_sections("totem"):
+                for attrs in totem.get_attributes("cluster_name"):
+                    cluster_name = attrs[1]
+            if cluster_name:
+                return cluster_name
+        except (IOError, corosync_conf_utils.CorosyncConfException) as e:
             return ""
 
-        p = re.compile('cluster_name: *(.*)')
-        for line in f:
-            m = p.match(line)
-            if m:
-                return m.group(1)
-
     return ""
 
 def write_empty_cib(cibfile):
@@ -2164,31 +1986,64 @@ def is_iso8601_date(var):
     output, retVal = run(["iso8601", "-d", var])
     return retVal == 0
 
+def verify_cert_key_pair(cert, key):
+    errors = []
+    cert_modulus = ""
+    key_modulus = ""
+
+    output, retval = run(
+        ["/usr/bin/openssl", "x509", "-modulus", "-noout"],
+        string_for_stdin=cert
+    )
+    if retval != 0:
+        errors.append("Invalid certificate: {0}".format(output.strip()))
+    else:
+        cert_modulus = output.strip()
+
+    output, retval = run(
+        ["/usr/bin/openssl", "rsa", "-modulus", "-noout"],
+        string_for_stdin=key
+    )
+    if retval != 0:
+        errors.append("Invalid key: {0}".format(output.strip()))
+    else:
+        key_modulus = output.strip()
+
+    if not errors and cert_modulus and key_modulus:
+        if cert_modulus != key_modulus:
+            errors.append("Certificate does not match the key")
+
+    return errors
+
 # Does pacemaker consider a variable as true in cib?
 # See crm_is_true in pacemaker/lib/common/utils.c
 def is_cib_true(var):
     return var.lower() in ("true", "on", "yes", "y", "1")
 
 def is_systemctl():
-    if os.path.exists('/usr/bin/systemctl'):
-        return True
-    else:
-        return False
+    systemctl_paths = [
+        '/usr/bin/systemctl',
+        '/bin/systemctl',
+        '/var/run/systemd/system',
+    ]
+    for path in systemctl_paths:
+        if os.path.exists(path):
+            return True
+    return False
 
+ at simple_cache
 def is_rhel6():
-    try:
-        issue = open('/etc/system-release').read()
-    except IOError as e:
-        return False
-
-# Since there are so many RHEL 6 variants, this check looks for the first
-# number in /etc/system-release followed by a period and number, and if it's 6.N,
-# it returns true.
-    match = re.search(r'(\d)\.\d', issue)
-    if match and match.group(1) == "6":
-        return True
-    else:
+    # Checking corosync version works in most cases and supports non-rhel
+    # distributions as well as running (manually compiled) corosync2 on rhel6.
+    # - corosync2 does not support cman at all
+    # - corosync1 runs with cman on rhel6
+    # - corosync1 can be used without cman, but we don't support it anyways
+    # - corosync2 is the default result if errors occur
+    output, retval = run(["corosync", "-v"])
+    if retval != 0:
         return False
+    match = re.search(r"version\D+(\d+)", output)
+    return match and match.group(1) == "1"
 
 def err(errorText, exit_after_error=True):
     sys.stderr.write("Error: %s\n" % errorText)
@@ -2197,14 +2052,14 @@ def err(errorText, exit_after_error=True):
 
 def serviceStatus(prefix):
     if is_systemctl():
-        print "Daemon Status:"
+        print("Daemon Status:")
         daemons = ["corosync", "pacemaker", "pcsd"]
         out, ret = run(["systemctl", "is-active"] + daemons)
         status = out.split("\n")
         out, ret = run(["systemctl", "is-enabled"]+ daemons)
         enabled = out.split("\n")
         for i in range(len(daemons)):
-            print prefix + daemons[i] + ": " + status[i] + "/" + enabled[i]
+            print(prefix + daemons[i] + ": " + status[i] + "/" + enabled[i])
 
 def enableServices():
     if is_rhel6():
@@ -2230,7 +2085,7 @@ def disableServices():
             run(["chkconfig", "corosync", "off"])
             run(["chkconfig", "pacemaker", "off"])
 
-def write_file(path, data):
+def write_file(path, data, permissions=0o644, binary=False):
     if os.path.exists(path):
         if not "--force" in pcs_options:
             return False, "'%s' already exists, use --force to overwrite" % path
@@ -2239,8 +2094,9 @@ def write_file(path, data):
                 os.remove(path)
             except EnvironmentError as e:
                 return False, "unable to remove '%s': %s" % (path, e)
+    mode = "wb" if binary else "w"
     try:
-        with open(path, "w") as outfile:
+        with os.fdopen(os.open(path, os.O_WRONLY | os.O_CREAT, permissions), mode) as outfile:
             outfile.write(data)
     except EnvironmentError as e:
         return False, "unable to write to '%s': %s" % (path, e)
@@ -2264,13 +2120,13 @@ def tar_add_file_data(
         info.uname = uname
     if gname is not None:
         info.gname = gname
-    data_io = cStringIO.StringIO(data)
+    data_io = BytesIO(data)
     tarball.addfile(info, data_io)
     data_io.close()
 
 def simulate_cib(cib_dom):
-    new_cib_file = tempfile.NamedTemporaryFile("w+b", -1, ".pcs")
-    transitions_file = tempfile.NamedTemporaryFile("w+b", -1, ".pcs")
+    new_cib_file = tempfile.NamedTemporaryFile(mode="w+", suffix=".pcs")
+    transitions_file = tempfile.NamedTemporaryFile(mode="w+", suffix=".pcs")
     output, retval = run(
         ["crm_simulate", "--simulate", "--save-output", new_cib_file.name,
             "--save-graph", transitions_file.name, "--xml-pipe"],
@@ -2293,7 +2149,9 @@ def simulate_cib(cib_dom):
 
 def get_operations_from_transitions(transitions_dom):
     operation_list = []
-    watched_operations = ("start", "stop", "promote")
+    watched_operations = (
+        "start", "stop", "promote", "demote", "migrate_from", "migrate_to"
+    )
     for rsc_op in transitions_dom.getElementsByTagName("rsc_op"):
         primitives = rsc_op.getElementsByTagName("primitive")
         if not primitives:
@@ -2301,18 +2159,55 @@ def get_operations_from_transitions(transitions_dom):
         if rsc_op.getAttribute("operation").lower() not in watched_operations:
             continue
         for prim in primitives:
+            prim_id = prim.getAttribute("id")
             operation_list.append((
                 int(rsc_op.getAttribute("id")),
-                (
-                prim.getAttribute("id"),
-                rsc_op.getAttribute("operation").lower(),
-                rsc_op.getAttribute("on_node"),
-                )
+                {
+                    "id": prim_id,
+                    "long_id": prim.getAttribute("long-id") or prim_id,
+                    "operation": rsc_op.getAttribute("operation").lower(),
+                    "on_node": rsc_op.getAttribute("on_node"),
+                }
             ))
     operation_list.sort(key=lambda x: x[0])
     op_list = [op[1] for op in operation_list]
     return op_list
 
+def get_resources_location_from_operations(cib_dom, resources_operations):
+    locations = {}
+    for res_op in resources_operations:
+        operation = res_op["operation"]
+        if operation not in ("start", "promote", "migrate_from"):
+            continue
+        long_id = res_op["long_id"]
+        if long_id not in locations:
+            # Move clone instances as if they were non-cloned resources, it
+            # really works with current pacemaker (1.1.13-6). Otherwise there
+            # is probably no way to move them other then setting their
+            # stickiness to 0.
+            res_id = res_op["id"]
+            if ":" in res_id:
+                res_id = res_id.split(":")[0]
+            id_for_constraint = validate_constraint_resource(
+                cib_dom, res_id
+            )[2]
+            if not id_for_constraint:
+                continue
+            locations[long_id] = {
+                "id": res_op["id"],
+                "long_id": long_id,
+                "id_for_constraint": id_for_constraint,
+            }
+        if operation in ("start", "migrate_from"):
+            locations[long_id]["start_on_node"] = res_op["on_node"]
+        if operation == "promote":
+            locations[long_id]["promote_on_node"] = res_op["on_node"]
+    locations_clean = dict([
+        (key, val) for key, val in locations.items()
+        if "start_on_node" in val or "promote_on_node" in val
+    ])
+    return locations_clean
+
 def get_remote_quorumtool_output(node):
     return sendHTTPRequest(node, "remote/get_quorum_info", None, False, False)
 
@@ -2366,7 +2261,7 @@ def parse_cman_quorum_info(cman_info):
                     continue
                 if not ":" in line:
                     continue
-                parts = map(lambda x: x.strip(), line.split(":", 1))
+                parts = [x.strip() for x in line.split(":", 1)]
                 if parts[0] == "Quorum":
                     parsed["quorate"] = "Activity blocked" not in parts[1]
                     match = re.match("(\d+).*", parts[1])
@@ -2408,7 +2303,7 @@ def parse_quorumtool_output(quorumtool_output):
                     continue
                 if not ":" in line:
                     continue
-                parts = map(lambda x: x.strip(), line.split(":", 1))
+                parts = [x.strip() for x in line.split(":", 1)]
                 if parts[0] == "Quorate":
                     parsed["quorate"] = parts[1].lower() == "yes"
                 elif parts[0] == "Quorum":
@@ -2439,3 +2334,106 @@ def is_node_stop_cause_quorum_loss(quorum_info, local=True, node_list=None):
         votes_after_stop += node_info["votes"]
     return votes_after_stop < quorum_info["quorum"]
 
+def dom_prepare_child_element(dom_element, tag_name, id_prefix=""):
+    dom = dom_element.ownerDocument
+    child_elements = []
+    for child in dom_element.childNodes:
+        if child.nodeType == child.ELEMENT_NODE and child.tagName == tag_name:
+            child_elements.append(child)
+
+    if len(child_elements) == 0:
+        child_element = dom.createElement(tag_name)
+        child_element.setAttribute(
+            "id", id_prefix + tag_name
+        )
+        dom_element.appendChild(child_element)
+    else:
+        child_element = child_elements[0]
+    return child_element
+
+def dom_update_nv_pair(dom_element, name, value, id_prefix=""):
+    dom = dom_element.ownerDocument
+    element_found = False
+    for el in dom_element.getElementsByTagName("nvpair"):
+        if el.getAttribute("name") == name:
+            element_found = True
+            if value == "":
+                dom_element.removeChild(el)
+            else:
+                el.setAttribute("value", value)
+            break
+    if not element_found and value != "":
+        el = dom.createElement("nvpair")
+        el.setAttribute("id", id_prefix + name)
+        el.setAttribute("name", name)
+        el.setAttribute("value", value)
+        dom_element.appendChild(el)
+    return dom_element
+
+# Passed an array of strings ["a=b","c=d"], return array of tuples
+# [("a","b"),("c","d")]
+def convert_args_to_tuples(ra_values):
+    ret = []
+    for ra_val in ra_values:
+        if ra_val.count("=") != 0:
+            split_val = ra_val.split("=", 1)
+            ret.append((split_val[0],split_val[1]))
+    return ret
+
+def is_int(val):
+    try:
+        int(val)
+        return True
+    except ValueError:
+        return False
+
+def dom_update_utilization(dom_element, attributes, id_prefix=""):
+    utilization = dom_prepare_child_element(
+        dom_element,
+        "utilization",
+        id_prefix + dom_element.getAttribute("id") + "-"
+    )
+
+    for name, value in attributes:
+        if value != "" and not is_int(value):
+            err(
+                "Value of utilization attribute must be integer: "
+                "'{0}={1}'".format(name, value)
+            )
+        dom_update_nv_pair(
+            utilization,
+            name,
+            value.strip(),
+            utilization.getAttribute("id") + "-"
+        )
+
+def dom_update_meta_attr(dom_element, attributes):
+    meta_attributes = dom_prepare_child_element(
+        dom_element, "meta_attributes", dom_element.getAttribute("id") + "-"
+    )
+
+    for name, value in attributes:
+        dom_update_nv_pair(
+            meta_attributes,
+            name,
+            value,
+            meta_attributes.getAttribute("id") + "-"
+        )
+
+def get_utilization(element):
+    utilization = {}
+    for e in element.getElementsByTagName("utilization"):
+        for u in e.getElementsByTagName("nvpair"):
+            name = u.getAttribute("name")
+            value = u.getAttribute("value") if u.hasAttribute("value") else ""
+            utilization[name] = value
+        # Use just first element of utilization attributes. We don't support
+        # utilization with rules just yet.
+        break
+    return utilization
+
+def get_utilization_str(element):
+    output = []
+    for name, value in sorted(get_utilization(element).items()):
+        output.append(name + "=" + value)
+    return " ".join(output)
diff --git a/pcsd/Gemfile b/pcsd/Gemfile
index 1e7b6ce..fb97a1a 100644
--- a/pcsd/Gemfile
+++ b/pcsd/Gemfile
@@ -1,8 +1,11 @@
 source 'https://rubygems.org'
 
+source 'https://tojeline.fedorapeople.org/rubygems/' do
+    gem 'rpam-ruby19-feist', :platform => :ruby_18
+end
+
 gem 'sinatra'
 gem 'sinatra-contrib'
-gem 'highline'
 gem 'rack'
 gem 'rack-protection'
 gem 'tilt'
@@ -11,6 +14,8 @@ gem 'rack-test'
 gem 'backports'
 gem 'sinatra-sugar'
 gem 'monkey-lib'
-gem 'rpam-ruby19'
+gem 'rpam-ruby19', :platform => [:ruby_19, :ruby_20, :ruby_21, :ruby_22]
+gem 'json'
 gem 'multi_json'
 gem 'open4'
+gem 'orderedhash'
diff --git a/pcsd/Gemfile.lock b/pcsd/Gemfile.lock
index 06db6d6..3140d6f 100644
--- a/pcsd/Gemfile.lock
+++ b/pcsd/Gemfile.lock
@@ -1,30 +1,32 @@
 GEM
   remote: https://rubygems.org/
+  remote: https://tojeline.fedorapeople.org/rubygems/
   specs:
     backports (3.6.4)
-    eventmachine (1.0.3)
-    highline (1.6.20)
+    eventmachine (1.0.7)
+    json (1.8.3)
     monkey-lib (0.5.4)
       backports
-    multi_json (1.10.1)
+    multi_json (1.11.1)
     open4 (1.3.4)
-    rack (1.5.2)
+    orderedhash (0.0.6)
+    rack (1.6.4)
     rack-protection (1.5.3)
       rack
-    rack-test (0.6.2)
+    rack-test (0.6.3)
       rack (>= 1.0)
     rpam-ruby19 (1.2.1)
-    sinatra (1.4.5)
+    sinatra (1.4.6)
       rack (~> 1.4)
       rack-protection (~> 1.4)
-      tilt (~> 1.3, >= 1.3.4)
-    sinatra-contrib (1.4.2)
+      tilt (>= 1.3, < 3)
+    sinatra-contrib (1.4.4)
       backports (>= 2.0)
       multi_json
       rack-protection
       rack-test
       sinatra (~> 1.4.0)
-      tilt (~> 1.3)
+      tilt (>= 1.3, < 3)
     sinatra-sugar (0.5.1)
       monkey-lib (~> 0.5.0)
       sinatra (~> 1.0)
@@ -36,10 +38,11 @@ PLATFORMS
 DEPENDENCIES
   backports
   eventmachine
-  highline
+  json
   monkey-lib
   multi_json
   open4
+  orderedhash
   rack
   rack-protection
   rack-test
diff --git a/pcsd/Makefile b/pcsd/Makefile
index 325f54a..e18d2df 100644
--- a/pcsd/Makefile
+++ b/pcsd/Makefile
@@ -1,19 +1,14 @@
-REL_INFO := $(shell grep -q -i "release 6" /etc/redhat-release ; echo $$?)
-
-ifeq (${REL_INFO},1)
-  build_gems: build_gems_normal
-else
-  build_gems: build_gems_rhel6
-endif
-
-build_gems_normal:
+build_gems: get_gems
 	bundle install --local --deployment
 
 # RHEL6 needs special rpam-ruby19 gem to work with 1.8.7
+# also bundler is not available on RHEL6 in rpm
 build_gems_rhel6:
 	mkdir -p vendor/bundle/ruby
-	gem install --no-rdoc --no-ri -l -i vendor/bundle/ruby vendor/cache/rack-1.5.2.gem vendor/cache/open4-1.3.4.gem vendor/cache/highline-1.6.20.gem vendor/cache/rack-test-0.6.2.gem vendor/cache/backports-3.6.4.gem vendor/cache/multi_json-1.10.1.gem vendor/cache/tilt-1.4.1.gem vendor/cache/rack-protection-1.5.3.gem vendor/cache/sinatra-1.4.5.gem vendor/cache/rhel6-only/json-1.8.1.gem vendor/cache/rhel6-only/rpam-ruby19-feist-1.2.1.1.gem vendor/cache/rhel6-only/json-1.8.1.gem vendor/cache/mo [...]
+	gem install --verbose --no-rdoc --no-ri -l -i vendor/bundle/ruby vendor/cache/backports-3.6.4.gem vendor/cache/eventmachine-1.0.7.gem vendor/cache/json-1.8.3.gem vendor/cache/monkey-lib-0.5.4.gem vendor/cache/multi_json-1.11.1.gem vendor/cache/open4-1.3.4.gem vendor/cache/orderedhash-0.0.6.gem vendor/cache/rack-1.6.4.gem vendor/cache/rack-protection-1.5.3.gem vendor/cache/rack-test-0.6.3.gem vendor/cache/rpam-ruby19-feist-1.2.1.1.gem vendor/cache/tilt-1.4.1.gem vendor/cache/sinatra-1.4. [...]
 
 get_gems:
 	bundle package
 
+clean:
+	rm -rfv vendor/
diff --git a/pcsd/auth.rb b/pcsd/auth.rb
index 8953d60..f5e7d38 100644
--- a/pcsd/auth.rb
+++ b/pcsd/auth.rb
@@ -1,7 +1,7 @@
 require 'json'
-require 'pp'
 require 'securerandom'
 require 'rpam'
+require 'base64'
 
 class PCSAuth
   # Ruby 1.8.7 doesn't implement SecureRandom.uuid
@@ -16,38 +16,26 @@ class PCSAuth
     end
   end
 
-  def self.validUser(username, password, generate_token = false, request = nil)
+  def self.validUser(username, password, generate_token = false)
     $logger.info("Attempting login by '#{username}'")
-    if not Rpam.auth(username,password, :service => "pcsd")
+    if not Rpam.auth(username, password, :service => "pcsd")
       $logger.info("Failed login by '#{username}' (bad username or password)")
       return nil
     end
-
-    stdout, stderr, retval = run_cmd("id", "-Gn", username)
-    if retval != 0
-      $logger.info("Failed login by '#{username}' (unable to determine groups user is a member of)")
-      return nil
-    end
-
-    if not stdout[0].match(/\bhaclient\b/)
-      $logger.info("Failed login by '#{username}' (user is not a member of haclient)")
-      return nil
-    end
-
-    $logger.info("Successful login by '#{username}'")
+    return nil if not isUserAllowedToLogin(username)
 
     if generate_token
       token = PCSAuth.uuid
       begin
-      	password_file = File.open($user_pass_file, File::RDWR|File::CREAT)
-	password_file.flock(File::LOCK_EX)
-	json = password_file.read()
-	users = JSON.parse(json)
-      rescue Exception => ex
-	$logger.info "Empty pcs_users.conf file, creating new file"
-	users = []
+        password_file = File.open($user_pass_file, File::RDWR|File::CREAT)
+        password_file.flock(File::LOCK_EX)
+        json = password_file.read()
+        users = JSON.parse(json)
+      rescue Exception
+        $logger.info "Empty pcs_users.conf file, creating new file"
+        users = []
       end
-      users << {"username" => username, "token" => token, "client" => request.ip, "creation_date" => Time.now}
+      users << {"username" => username, "token" => token, "creation_date" => Time.now}
       password_file.truncate(0)
       password_file.rewind
       password_file.write(JSON.pretty_generate(users))
@@ -57,6 +45,39 @@ class PCSAuth
     return true
   end
 
+  def self.getUsersGroups(username)
+    stdout, stderr, retval = run_cmd(
+      getSuperuserSession, "id", "-Gn", username
+    )
+    if retval != 0
+      $logger.info(
+        "Unable to determine groups of user '#{username}': #{stderr.join(' ').strip}"
+      )
+      return [false, []]
+    end
+    return [true, stdout.join(' ').split(nil)]
+  end
+
+  def self.isUserAllowedToLogin(username, log_success=true)
+    success, groups = getUsersGroups(username)
+    if not success
+      $logger.info(
+        "Failed login by '#{username}' (unable to determine user's groups)"
+      )
+      return false
+    end
+    if not groups.include?(ADMIN_GROUP)
+      $logger.info(
+        "Failed login by '#{username}' (user is not a member of #{ADMIN_GROUP})"
+      )
+      return false
+    end
+    if log_success
+      $logger.info("Successful login by '#{username}'")
+    end
+    return true
+  end
+
   def self.validToken(token)
     begin
       json = File.read($user_pass_file)
@@ -67,43 +88,79 @@ class PCSAuth
 
     users.each {|u|
       if u["token"] == token
-	return u["username"]
+        return u["username"]
       end
     }
     return false
   end
 
-  def self.isLoggedIn(session, cookies)
+  def self.loginByToken(session, cookies)
     if username = validToken(cookies["token"])
-      if username == "hacluster" and $cookies.key?(:CIB_user) and $cookies.key?(:CIB_user) != ""
-        $session[:username] = $cookies[:CIB_user]
+      if SUPERUSER == username
+        if cookies['CIB_user'] and cookies['CIB_user'].strip != ''
+          session[:username] = cookies['CIB_user']
+          if cookies['CIB_user_groups'] and cookies['CIB_user_groups'].strip != ''
+            session[:usergroups] = cookieUserDecode(
+              cookies['CIB_user_groups']
+            ).split(nil)
+          else
+            session[:usergroups] = []
+          end
+        else
+          session[:username] = SUPERUSER
+          session[:usergroups] = []
+        end
+        return true
+      else
+        session[:username] = username
+        success, groups = getUsersGroups(username)
+        session[:usergroups] = success ? groups : []
+        return true
       end
-      return true
-    else
-      return session[:username] != nil
     end
+    return false
   end
 
-  # Always an admin until we implement groups
-  def self.isAdmin(session)
-    true
+  def self.loginByPassword(session, username, password)
+    if validUser(username, password)
+      session[:username] = username
+      success, groups = getUsersGroups(username)
+      session[:usergroups] = success ? groups : []
+      return true
+    end
+    return false
   end
 
-  def self.createUser(username, password)
-    begin
-      json = File.read($user_pass_file)
-      users = JSON.parse(json)
-    rescue
-      users = []
+  def self.isLoggedIn(session)
+    username = session[:username]
+    if (username != nil) and isUserAllowedToLogin(username, false)
+      success, groups = getUsersGroups(username)
+      session[:usergroups] = success ? groups : []
+      return true
     end
+    return false
+  end
 
-    token = PCSAuth.uuid
+  def self.getSuperuserSession()
+    return {
+      :username => SUPERUSER,
+      :usergroups => [],
+    }
+  end
 
-    users.delete_if{|u| u["username"] == username}
-    users << {"username" => username, "password" => password, "token" => token}
-    File.open($user_pass_file, "w") do |f|
-      f.write(JSON.pretty_generate(users))
-    end
+  # Let's be safe about characters in cookie variables and do base64.
+  # We cannot do it for CIB_user however to be backward compatible
+  # so we at least remove disallowed characters.
+  def self.cookieUserSafe(text)
+    return text.gsub(/[^!-~]/, '').gsub(';', '')
+  end
+
+  def self.cookieUserEncode(text)
+    return Base64.encode64(text).gsub("\n", '')
+  end
+
+  def self.cookieUserDecode(text)
+    return Base64.decode64(text)
   end
 end
 
diff --git a/pcsd/bootstrap.rb b/pcsd/bootstrap.rb
new file mode 100644
index 0000000..027e654
--- /dev/null
+++ b/pcsd/bootstrap.rb
@@ -0,0 +1,77 @@
+require 'logger'
+require 'pathname'
+require 'open4'
+
+require 'settings.rb'
+
+
+def is_rhel6()
+  # Checking corosync version works in most cases and supports non-rhel
+  # distributions as well as running (manually compiled) corosync2 on rhel6.
+  # - corosync2 does not support cman at all
+  # - corosync1 runs with cman on rhel6
+  # - corosync1 can be used without cman, but we don't support it anyways
+  # - corosync2 is the default result if errors occur
+  out = ''
+  status = Open4::popen4(COROSYNC, '-v') { |pid, stdin, stdout, stderr|
+    out = stdout.readlines()
+  }
+  retval = status.exitstatus
+  return false if retval != 0
+  match = /version\D+(\d+)/.match(out.join())
+  return (match and match[1] == "1")
+end
+
+def is_systemctl()
+  systemctl_paths = [
+      '/usr/bin/systemctl',
+      '/bin/systemctl',
+      '/var/run/systemd/system',
+  ]
+  systemctl_paths.each { |path|
+    return true if File.exist?(path)
+  }
+  return false
+end
+
+def get_pcs_path(pcsd_path)
+  real_path = Pathname.new(pcsd_path).realpath.to_s
+  if PCSD_EXEC_LOCATION == real_path or PCSD_EXEC_LOCATION == (real_path + '/')
+    return '/usr/sbin/pcs'
+  else
+    return '../pcs/pcs'
+  end
+end
+
+PCS_VERSION = '0.9.148'
+COROSYNC = COROSYNC_BINARIES + "corosync"
+ISRHEL6 = is_rhel6
+ISSYSTEMCTL = is_systemctl
+if ISRHEL6
+  COROSYNC_CMAPCTL = COROSYNC_BINARIES + "corosync-objctl"
+else
+  COROSYNC_CMAPCTL = COROSYNC_BINARIES + "corosync-cmapctl"
+end
+COROSYNC_QUORUMTOOL = COROSYNC_BINARIES + "corosync-quorumtool"
+
+if not defined? $cur_node_name
+  $cur_node_name = `hostname`.chomp
+end
+
+def configure_logger(log_device)
+  logger = Logger.new(log_device)
+  if ENV['PCSD_DEBUG'] and ENV['PCSD_DEBUG'].downcase == "true" then
+    logger.level = Logger::DEBUG
+    logger.info "PCSD Debugging enabled"
+  else
+    logger.level = Logger::INFO
+  end
+
+  if ISRHEL6
+    logger.debug "Detected RHEL 6"
+  else
+    logger.debug "Did not detect RHEL 6"
+  end
+  return logger
+end
+
diff --git a/pcsd/cfgsync.rb b/pcsd/cfgsync.rb
new file mode 100644
index 0000000..0cfb587
--- /dev/null
+++ b/pcsd/cfgsync.rb
@@ -0,0 +1,773 @@
+require 'fileutils'
+require 'rexml/document'
+require 'digest/sha1'
+
+require 'settings.rb'
+require 'config.rb'
+require 'corosyncconf.rb'
+require 'pcs.rb'
+require 'auth.rb'
+
+def token_file_path()
+  filename = ENV['PCS_TOKEN_FILE']
+  unless filename.nil?
+    return filename
+  end
+  if Process.uid == 0
+    return File.join(PCSD_VAR_LOCATION, 'tokens')
+  end
+  return File.expand_path('~/.pcs/tokens')
+end
+
+def settings_file_path()
+  current_dir = File.expand_path(File.dirname(__FILE__))
+  if PCSD_EXEC_LOCATION == current_dir or PCSD_EXEC_LOCATION == (current_dir + '/')
+    return File.join(PCSD_VAR_LOCATION, 'pcs_settings.conf')
+  else
+    return File.join(current_dir, 'pcs_settings.conf')
+  end
+end
+
+CFG_COROSYNC_CONF = "/etc/corosync/corosync.conf" unless defined? CFG_COROSYNC_CONF
+CFG_CLUSTER_CONF = "/etc/cluster/cluster.conf" unless defined? CFG_CLUSTER_CONF
+CFG_PCSD_SETTINGS = settings_file_path() unless defined? CFG_PCSD_SETTINGS
+CFG_PCSD_TOKENS = token_file_path() unless defined? CFG_PCSD_TOKENS
+
+CFG_SYNC_CONTROL = File.join(PCSD_VAR_LOCATION, 'cfgsync_ctl') unless defined? CFG_SYNC_CONTROL
+
+module Cfgsync
+  class Config
+    include Comparable
+
+    # set @name, @file_path, @file_perm in ancestors
+    class << self
+      attr_reader :name, :file_path, :file_perm
+    end
+
+    def self.from_text(text)
+      return self.new(text)
+    end
+
+    def self.from_file(default=nil)
+      begin
+        file = nil
+        file = File.open(@file_path, File::RDONLY)
+        file.flock(File::LOCK_SH)
+        return self.from_text(file.read())
+      rescue => e
+        $logger.warn(
+          "Cannot read config '#{@name}' from '#{@file_path}': #{e.message}"
+        )
+        return self.from_text(default) if default
+        raise
+      ensure
+        unless file.nil?
+          file.flock(File::LOCK_UN)
+          file.close()
+        end
+      end
+    end
+
+    def self.backup()
+      begin
+        FileUtils.cp(@file_path, @file_path + "." + Time.now.to_i.to_s)
+      rescue => e
+        $logger.debug("Exception when backing up config '#{self.name}': #{e}")
+        return
+      end
+      begin
+        self.remove_old_backups()
+      rescue => e
+        $logger.debug("Exception when removing old backup files: #{e}")
+      end
+    end
+
+    def self.remove_old_backups()
+      backup_files = []
+      Dir.glob(@file_path + '.*') { |path|
+        if File.file?(path)
+          match = path.match(/^#{@file_path}\.(\d+)$/)
+          if match
+            backup_files << [match[1].to_i(), path]
+          end
+        end
+      }
+      backup_count = ConfigSyncControl::file_backup_count()
+      to_delete = backup_files.sort()[0..-(backup_count + 1)]
+      return if not to_delete
+      to_delete.each { |timestamp, path|
+        File.delete(path)
+      }
+    end
+
+    def text()
+      return @text
+    end
+
+    def text=(text)
+      @text = text
+      self.clean_cache()
+      return self
+    end
+
+    def hash()
+      return @hash ||= self.get_hash()
+    end
+
+    def version()
+      return @version ||= self.get_version().to_i()
+    end
+
+    def version=(new_version)
+      self.text = self.set_version(new_version)
+      return self
+    end
+
+    def save()
+      begin
+        file = nil
+        file = File.open(self.class.file_path, 'w', self.class.file_perm)
+        file.flock(File::LOCK_EX)
+        file.write(self.text)
+        $logger.info(
+          "Saved config '#{self.class.name}' version #{self.version} #{self.hash} to '#{self.class.file_path}'"
+        )
+      rescue => e
+        $logger.error(
+          "Cannot save config '#{self.class.name}': #{e.message}"
+        )
+        raise
+      ensure
+        unless file.nil?
+          file.flock(File::LOCK_UN)
+          file.close()
+        end
+      end
+    end
+
+    def <=>(other)
+      if self.version == other.version
+        return self.hash <=> other.hash
+      else
+        return self.version <=> other.version
+      end
+    end
+
+    protected
+
+    def initialize(text)
+      self.text = text
+    end
+
+    def clean_cache()
+      @hash = nil
+      @version = nil
+      return self
+    end
+
+    def get_hash()
+      return Digest::SHA1.hexdigest(self.text)
+    end
+  end
+
+
+  class PcsdSettings < Config
+    @name = "pcs_settings.conf"
+    @file_path = ::CFG_PCSD_SETTINGS
+    @file_perm = 0644
+
+    protected
+
+    def get_version()
+      return PCSConfig.new(self.text).data_version
+    end
+
+    def set_version(new_version)
+      parsed = PCSConfig.new(self.text)
+      parsed.data_version = new_version
+      return parsed.text
+    end
+  end
+
+
+  class PcsdTokens < Config
+    @name = 'tokens'
+    @file_path = ::CFG_PCSD_TOKENS
+    @file_perm = 0600
+
+    def self.backup()
+    end
+
+    def save()
+      dirname = File.dirname(self.class.file_path)
+      if not ENV['PCS_TOKEN_FILE'] and not File.directory?(dirname)
+        FileUtils.mkdir_p(dirname, {:mode => 0700})
+      end
+      super
+    end
+
+    protected
+
+    def get_version()
+      return PCSTokens.new(self.text).data_version
+    end
+
+    def set_version(new_version)
+      parsed = PCSTokens.new(self.text)
+      parsed.data_version = new_version
+      return parsed.text
+    end
+  end
+
+
+  class ClusterConf < Config
+    @name = "cluster.conf"
+    @file_path = ::CFG_CLUSTER_CONF
+    @file_perm = 0644
+
+    protected
+
+    def get_version()
+      dom = REXML::Document.new(self.text)
+      if dom.root and dom.root.name == 'cluster'
+        return dom.root.attributes['config_version'].to_i
+      end
+      return 0
+    end
+
+    def set_version(new_version)
+      dom = REXML::Document.new(self.text)
+      if dom.root and dom.root.name == 'cluster'
+        dom.root.attributes['config_version'] = new_version
+      end
+      return dom.to_s
+    end
+  end
+
+
+  class CorosyncConf < Config
+    @name = "corosync.conf"
+    @file_path = ::CFG_COROSYNC_CONF
+    @file_perm = 0644
+
+    protected
+
+    def get_version()
+      parsed = ::CorosyncConf::parse_string(self.text)
+      # mimic corosync behavior - the last config_version found is used
+      version = nil
+      parsed.sections('totem').each { |totem|
+        totem.attributes('config_version').each { |attrib|
+          version = attrib[1].to_i
+        }
+      }
+      return version ? version : 0
+    end
+
+    def set_version(new_version)
+      parsed = ::CorosyncConf::parse_string(self.text)
+      parsed.sections('totem').each { |totem|
+        totem.set_attribute('config_version', new_version)
+      }
+      return parsed.text
+    end
+  end
+
+
+  class ConfigSyncControl
+    @thread_interval_default = 60
+    @thread_interval_minimum = 20
+    @file_backup_count_default = 50
+    @file_backup_count_minimum = 0
+
+    def self.sync_thread_allowed?()
+      data = self.load()
+      return !(
+        self.sync_thread_paused_data?(data)\
+        or\
+        self.sync_thread_disabled_data?(data)
+      )
+    end
+
+    def self.sync_thread_paused?()
+      return self.sync_thread_paused_data?(self.load())
+    end
+
+    def self.sync_thread_disabled?()
+      return self.sync_thread_disabled_data?(self.load())
+    end
+
+    def self.sync_thread_interval()
+      return self.get_integer_value(
+        self.load()['thread_interval'],
+        @thread_interval_default,
+        @thread_interval_minimum
+      )
+    end
+
+    def self.sync_thread_interval=(seconds)
+      data = self.load()
+      data['thread_interval'] = seconds
+      return self.save(data)
+    end
+
+    def self.sync_thread_pause(semaphore_cfgsync, seconds=300)
+      # wait for the thread to finish current run and disable it
+      semaphore_cfgsync.synchronize {
+        data = self.load()
+        data['thread_paused_until'] = Time.now.to_i() + seconds.to_i()
+        return self.save(data)
+      }
+    end
+
+    def self.sync_thread_resume()
+      data = self.load()
+      if data['thread_paused_until']
+        data.delete('thread_paused_until')
+        return self.save(data)
+      end
+      return true
+    end
+
+    def self.sync_thread_disable(semaphore_cfgsync)
+      # wait for the thread to finish current run and disable it
+      semaphore_cfgsync.synchronize {
+        data = self.load()
+        data['thread_disabled'] = true
+        return self.save(data)
+      }
+    end
+
+    def self.sync_thread_enable()
+      data = self.load()
+      if data['thread_disabled']
+        data.delete('thread_disabled')
+        return self.save(data)
+      end
+      return true
+    end
+
+    def self.file_backup_count()
+      return self.get_integer_value(
+        self.load()['file_backup_count'],
+        @file_backup_count_default,
+        @file_backup_count_minimum
+      )
+    end
+
+    def self.file_backup_count=(count)
+      data = self.load()
+      data['file_backup_count'] = count
+      return self.save(data)
+    end
+
+    protected
+
+    def self.sync_thread_paused_data?(data)
+      if data['thread_paused_until']
+        paused_until = data['thread_paused_until'].to_i()
+        return ((paused_until > 0) and (Time.now().to_i() < paused_until))
+      end
+      return false
+    end
+
+    def self.sync_thread_disabled_data?(data)
+      return data['thread_disabled']
+    end
+
+    def self.get_integer_value(value, default, minimum)
+      return default if value.nil?
+      if value.respond_to?(:match)
+        return default if not value.match(/\A\s*[+-]?\d+\Z/)
+      end
+      return default if not value.respond_to?(:to_i)
+      numeric = value.to_i()
+      return minimum if numeric < minimum
+      return numeric
+    end
+
+    def self.load()
+      begin
+        file = nil
+        file = File.open(CFG_SYNC_CONTROL, File::RDONLY)
+        file.flock(File::LOCK_SH)
+        return JSON.parse(file.read())
+      rescue => e
+        $logger.debug("Cannot read config '#{CFG_SYNC_CONTROL}': #{e.message}")
+        return {}
+      ensure
+        unless file.nil?
+          file.flock(File::LOCK_UN)
+          file.close()
+        end
+      end
+    end
+
+    def self.save(data)
+      text = JSON.pretty_generate(data)
+      begin
+        file = nil
+        file = File.open(CFG_SYNC_CONTROL, 'w', 0600)
+        file.flock(File::LOCK_EX)
+        file.write(text)
+      rescue => e
+        $logger.error("Cannot save config '#{CFG_SYNC_CONTROL}': #{e.message}")
+        return false
+      ensure
+        unless file.nil?
+          file.flock(File::LOCK_UN)
+          file.close()
+        end
+      end
+      return true
+    end
+  end
+
+
+  class ConfigPublisher
+    def initialize(session, configs, nodes, cluster_name, tokens={})
+      @configs = configs
+      @nodes = nodes
+      @cluster_name = cluster_name
+      @published_configs_names = @configs.collect { |cfg|
+        cfg.class.name
+      }
+      @additional_tokens = tokens
+      @session = session
+    end
+
+    def send(force=false)
+      nodes_txt = @nodes.join(', ')
+      @configs.each { |cfg|
+        $logger.info(
+          "Sending config '#{cfg.class.name}' version #{cfg.version} #{cfg.hash}"\
+          + " to nodes: #{nodes_txt}"
+        )
+      }
+
+      data = self.prepare_request_data(@configs, @cluster_name, force)
+      node_response = {}
+      threads = []
+      @nodes.each { |node|
+        threads << Thread.new {
+          code, out = send_request_with_token(
+            @session, node, 'set_configs', true, data, true, nil, 30,
+            @additional_tokens
+          )
+          if 200 == code
+            begin
+              node_response[node] = JSON.parse(out)
+            rescue JSON::ParserError
+            end
+          elsif 404 == code
+            node_response[node] = {'status' => 'not_supported'}
+          else
+            begin
+              response = JSON.parse(out)
+              if true == response['notauthorized'] or true == response['notoken']
+                node_response[node] = {'status' => 'notauthorized'}
+              end
+            rescue JSON::ParserError
+            end
+          end
+          if not node_response.key?(node)
+            node_response[node] = {'status' => 'error'}
+          end
+          # old pcsd returns this instead of 404 if pacemaker isn't running there
+          if node_response[node]['pacemaker_not_running']
+            node_response[node] = {'status' => 'not_supported'}
+          end
+        }
+      }
+      threads.each { |t| t.join }
+
+      node_response.each { |node, response|
+        $logger.info("Sending config response from #{node}: #{response}")
+      }
+
+      return node_response
+    end
+
+    def publish()
+      @configs.each { |cfg|
+        cfg.version += 1
+      }
+      node_response = self.send()
+      return [
+        self.get_old_local_configs(node_response, @published_configs_names),
+        node_response
+      ]
+    end
+
+    protected
+
+    def prepare_request_data(configs, cluster_name, force)
+      data = {
+        'configs' => {},
+      }
+      data['cluster_name'] = cluster_name if cluster_name
+      configs.each { |cfg|
+        data['configs'][cfg.class.name] = {
+          'type' => 'file',
+          'text' => cfg.text,
+        }
+      }
+      data['force'] = true if force
+      return {
+        'configs' => JSON.generate(data)
+      }
+    end
+
+    def get_old_local_configs(node_response, published_configs_names)
+      old_local_configs = []
+      node_response.each { |node, response|
+        if 'ok' == response['status'] and response['result']
+          response['result'].each { |cfg_name, status|
+            if 'rejected' == status and published_configs_names.include?(cfg_name)
+              old_local_configs << cfg_name
+            end
+          }
+        end
+      }
+      return old_local_configs.uniq
+    end
+  end
+
+
+  class ConfigFetcher
+    def initialize(session, config_classes, nodes, cluster_name)
+      @config_classes = config_classes
+      @nodes = nodes
+      @cluster_name = cluster_name
+      @session = session
+    end
+
+    def fetch_all()
+      return self.filter_configs_cluster(
+        self.get_configs_cluster(@nodes, @cluster_name),
+        @config_classes
+      )
+    end
+
+    def fetch()
+      configs_cluster = self.fetch_all()
+
+      newest_configs_cluster = {}
+      configs_cluster.each { |name, cfgs|
+        newest_configs_cluster[name] = self.find_newest_config(cfgs)
+      }
+      configs_local = self.get_configs_local()
+
+      to_update_locally = []
+      to_update_in_cluster = []
+      configs_local.each { |name, local_cfg|
+        if newest_configs_cluster.key?(name)
+          if newest_configs_cluster[name].version > local_cfg.version
+            to_update_locally << newest_configs_cluster[name]
+          elsif newest_configs_cluster[name].version < local_cfg.version
+            to_update_in_cluster << local_cfg
+          elsif newest_configs_cluster[name].hash != local_cfg.hash
+            to_update_locally << newest_configs_cluster[name]
+          end
+        end
+      }
+      return to_update_locally, to_update_in_cluster
+    end
+
+    protected
+
+    def get_configs_local()
+      return Cfgsync::get_configs_local(true)
+    end
+
+    def get_configs_cluster(nodes, cluster_name)
+      data = {
+        'cluster_name' => cluster_name,
+      }
+
+      $logger.debug 'Fetching configs from the cluster'
+      threads = []
+      node_configs = {}
+      nodes.each { |node|
+        threads << Thread.new {
+          code, out = send_request_with_token(
+            @session, node, 'get_configs', false, data
+          )
+          if 200 == code
+            begin
+              parsed = JSON::parse(out)
+              if 'ok' == parsed['status'] and cluster_name == parsed['cluster_name']
+                node_configs[node], _ = Cfgsync::sync_msg_to_configs(parsed)
+              end
+            rescue JSON::ParserError
+            end
+          end
+        }
+      }
+      threads.each { |t| t.join }
+      return node_configs
+    end
+
+    def filter_configs_cluster(node_configs, wanted_configs_classes)
+      configs = {}
+      node_configs.each { |node, cfg_map|
+        cfg_map.each { |name, cfg|
+          if wanted_configs_classes.include?(cfg.class)
+            configs[cfg.class.name] = configs[cfg.class.name] || []
+            configs[cfg.class.name] << cfg
+          end
+        }
+      }
+      return configs
+    end
+
+    def find_newest_config(config_list)
+      newest_version = config_list.collect { |cfg| cfg.version }.max
+      hash_config = {}
+      hash_count = {}
+      config_list.each { |cfg|
+        if cfg.version == newest_version
+          hash_config[cfg.hash] = cfg
+          if hash_count.key?(cfg.hash)
+            hash_count[cfg.hash] += 1
+          else
+            hash_count[cfg.hash] = 1
+          end
+        end
+      }
+      most_frequent_hash_count = hash_count.max_by { |hash, count| count }[1]
+      most_frequent_hashes = hash_count.reject { |hash, count|
+        count != most_frequent_hash_count
+      }
+      return hash_config[most_frequent_hashes.keys.max]
+    end
+  end
+
+
+  def self.cluster_cfg_class()
+    return ISRHEL6 ? ClusterConf : CorosyncConf
+  end
+
+  def self.get_cfg_classes()
+    return [PcsdSettings, PcsdTokens]
+    # return [PcsdSettings, self.cluster_cfg_class]
+  end
+
+  def self.get_cfg_classes_by_name()
+    classes = {}
+    self.get_cfg_classes.each { |cfgclass|
+      classes[cfgclass.name] = cfgclass
+    }
+    return classes
+  end
+
+  def self.sync_msg_to_configs(sync_msg)
+    cfg_classes = self.get_cfg_classes_by_name
+    configs = {}
+    unknown_config_names = []
+    sync_msg['configs'].each { |name, data|
+      if cfg_classes[name]
+        if 'file' == data['type'] and data['text']
+          configs[name] = cfg_classes[name].from_text(data['text'])
+        end
+      else
+        unknown_config_names << name
+      end
+    }
+    return configs, unknown_config_names
+  end
+
+  def self.get_configs_local(with_missing=false)
+    default = with_missing ? '' : nil
+    configs = {}
+    self.get_cfg_classes.each { |cfg_class|
+      begin
+        configs[cfg_class.name] = cfg_class.from_file(default)
+      rescue
+      end
+    }
+    return configs
+  end
+
+  # save and sync updated config
+  # return true on success, false on version conflict
+  def self.save_sync_new_version(config, nodes, cluster_name, fetch_on_conflict, tokens={})
+    if not cluster_name or cluster_name.empty?
+      # we run on a standalone host, no config syncing
+      config.version += 1
+      config.save()
+      return true, {}
+    else
+      # we run in a cluster so we need to sync the config
+      publisher = ConfigPublisher.new(
+        PCSAuth.getSuperuserSession(), [config], nodes, cluster_name, tokens
+      )
+      old_configs, node_responses = publisher.publish()
+      if old_configs.include?(config.class.name)
+        if fetch_on_conflict
+          fetcher = ConfigFetcher.new(
+            PCSAuth.getSuperuserSession(), [config.class], nodes, cluster_name
+          )
+          cfgs_to_save, _ = fetcher.fetch()
+          cfgs_to_save.each { |cfg_to_save|
+            cfg_to_save.save() if cfg_to_save.class == config.class
+          }
+        end
+        return false, node_responses
+      end
+      return true, node_responses
+    end
+  end
+
+  def self.merge_tokens_files(orig_cfg, to_merge_cfgs, new_tokens)
+    # Merge tokens files, use only newer tokens files, keep the most recent
+    # tokens, make sure new_tokens are included.
+    max_version = orig_cfg.version
+    with_new_tokens = PCSTokens.new(orig_cfg.text)
+    if to_merge_cfgs
+      to_merge_cfgs.reject! { |item| item.version <= orig_cfg.version }
+      if to_merge_cfgs.length > 0
+        to_merge_cfgs.sort.each { |ft|
+          with_new_tokens.tokens.update(PCSTokens.new(ft.text).tokens)
+        }
+        max_version = [to_merge_cfgs.max.version, max_version].max
+      end
+    end
+    with_new_tokens.tokens.update(new_tokens)
+    config_new = PcsdTokens.from_text(with_new_tokens.text)
+    config_new.version = max_version
+    return config_new
+  end
+
+  def self.save_sync_new_tokens(config, new_tokens, nodes, cluster_name)
+    with_new_tokens = PCSTokens.new(config.text)
+    with_new_tokens.tokens.update(new_tokens)
+    config_new = PcsdTokens.from_text(with_new_tokens.text)
+    if not cluster_name or cluster_name.empty?
+      # we run on a standalone host, no config syncing
+      config_new.version += 1
+      config_new.save()
+      return true, {}
+    end
+    # we run in a cluster so we need to sync the config
+    publisher = ConfigPublisher.new(
+      PCSAuth.getSuperuserSession(), [config_new], nodes, cluster_name,
+      new_tokens
+    )
+    old_configs, node_responses = publisher.publish()
+    if not old_configs.include?(config_new.class.name)
+      # no node had newer tokens file, we are ok, everything done
+      return true, node_responses
+    end
+    # get tokens from all nodes and merge them
+    fetcher = ConfigFetcher.new(
+      PCSAuth.getSuperuserSession(), [config_new.class], nodes, cluster_name
+    )
+    fetched_tokens = fetcher.fetch_all()[config_new.class.name]
+    config_new = Cfgsync::merge_tokens_files(config, fetched_tokens, new_tokens)
+    # and try to publish again
+    return Cfgsync::save_sync_new_version(
+      config_new, nodes, cluster_name, true, new_tokens
+    )
+  end
+end
diff --git a/pcsd/cluster.rb b/pcsd/cluster.rb
index 91c36dd..8d59dc9 100644
--- a/pcsd/cluster.rb
+++ b/pcsd/cluster.rb
@@ -1,9 +1,23 @@
-class Cluster 
-  attr_accessor :id, :name, :nodes, :num_nodes
-  def initialize(name, nodes)
+class Cluster
+  attr_accessor :name
+  attr_reader :nodes
+
+  def initialize(name, node_list)
     @name = name
-    @nodes = nodes
-    @num_nodes = nodes.length
+    self.nodes = node_list
+  end
+
+  def nodes=(node_list)
+    @nodes = []
+    node_list.each { |n|
+      @nodes << n if n.is_a?(String)
+    }
+    @nodes = @nodes.uniq.sort
+    return self
+  end
+
+  def num_nodes
+    @nodes.length
   end
 
   def ui_address
diff --git a/pcsd/cluster_entity.rb b/pcsd/cluster_entity.rb
new file mode 100644
index 0000000..41dfff7
--- /dev/null
+++ b/pcsd/cluster_entity.rb
@@ -0,0 +1,1051 @@
+require 'resource.rb'
+require 'pcs.rb'
+
+module ClusterEntity
+
+  def self.get_rsc_status(crm_dom)
+    unless crm_dom
+      return {}
+    end
+    status = {}
+    crm_dom.elements.each('/crm_mon/resources//resource') { |e|
+      rsc_id = e.attributes['id'].split(':')[0]
+      status[rsc_id] ||= []
+      status[rsc_id] << ClusterEntity::CRMResourceStatus.new(e)
+    }
+    return status
+  end
+
+  def self.get_resources_operations(cib_dom)
+    unless cib_dom
+      return {}
+    end
+    operations = {}
+    cib_dom.elements.each(
+      '/cib/status/node_state/lrm/lrm_resources/lrm_resource/lrm_rsc_op'
+    ) { |e|
+      rsc_id = e.parent.attributes['id'].split(':')[0]
+      operations[rsc_id] ||= []
+      operations[rsc_id] << ClusterEntity::ResourceOperation.new(e)
+    }
+    return operations
+  end
+
+  def self.obj_to_hash(obj, variables=nil)
+    unless variables
+      variables = obj.instance_variables
+    end
+    hash = {}
+    variables.each do |var|
+      hash[var.to_s[1..-1].to_sym] = obj.instance_variable_get var if obj.instance_variable_defined? var
+    end
+    hash
+  end
+
+  def self.get_meta_attr_version1(obj)
+    meta_attr = []
+    obj.meta_attr.each { |pair|
+      meta_attr << {
+        :key => pair.name,
+        :value => pair.value,
+        :id => pair.id,
+        :parent => obj.id
+      }
+    }
+    return meta_attr
+  end
+
+  def self.merge_meta_attr_version1(meta1, meta2)
+    to_add = []
+    meta2_keys = meta2.map { |x| x[:key]}
+    meta1.each { |m1|
+      unless meta2_keys.include? m1[:key]
+        to_add << m1
+      end
+    }
+    return meta2 + to_add
+  end
+
+  def self.get_meta_attr_from_status_v1(resource_id, meta_attr)
+    new_ma = ClusterEntity::NvSet.new
+    meta_attr.each { |v|
+      if v[:parent] == resource_id
+        new_ma << ClusterEntity::NvPair.new(v[:id], v[:key], v[:value])
+      end
+    }
+    return new_ma
+  end
+
+  def self.get_primitives_from_status_v1(resource_list)
+    primitives = {}
+    resource_list.each { |resource|
+      unless primitives.include?(resource[:id].to_sym)
+        p = ClusterEntity::Primitive.new
+        p.id = resource[:id]
+        p.agentname = resource[:agentname]
+        p.stonith = resource[:stonith]
+        if p.stonith
+          p._class = 'stonith'
+          p.type = p.agentname.split(':', 2)[1]
+        else
+          s =  p.agentname.split('::', 2)
+          p._class = s[0]
+          s = s[1].split(':', 2)
+          p.provider = s[0]
+          p.type = s[1]
+        end
+
+        p.meta_attr = ClusterEntity::get_meta_attr_from_status_v1(
+          p.id,
+          resource[:meta_attr]
+        )
+
+        resource[:instance_attr].each { |k, v|
+          p.instance_attr << ClusterEntity::NvPair.new(nil, k, v)
+        }
+
+        primitives[p.id.to_sym] = [
+          p,
+          {
+            :group => resource[:group],
+            :clone => resource[:clone_id],
+            :master => resource[:ms_id],
+            :meta_attr => resource[:meta_attr]
+          }
+        ]
+      end
+
+      primitive_struct = primitives[resource[:id].to_sym]
+      primitive = primitive_struct[0]
+      status = ClusterEntity::CRMResourceStatus.new
+      status.id = primitive.id
+      status.resource_agent = primitive.agentname
+      status.managed = false
+      status.failed = resource[:failed]
+      status.role = nil
+      status.active = resource[:active]
+      status.orphaned = resource[:orphaned]
+      status.failure_ignored = false
+      status.nodes_running_on = resource[:nodes].length
+      status.pending = nil
+      if status.nodes_running_on > 0
+        node = {
+          :id => nil,
+          :name => resource[:nodes][0],
+          :cached => false
+        }
+      else
+        node = nil
+      end
+      status.node = node
+      primitive.crm_status << status
+    }
+    primitives.each {|_, resource|
+      resource[0].update_status
+    }
+    return primitives
+  end
+
+  def self.make_resources_tree(primitives)
+    not_primitives = {}
+    tree = []
+    primitives.each { |_, primitive_struct|
+      p = primitive_struct[0]
+      data = primitive_struct[1]
+      unless data[:group] or data[:clone] or data[:master]
+        tree << p
+        next
+      end
+      group = nil
+      if data[:group]
+        if data[:clone] or data[:master]
+          group_id = data[:group].split('/', 2)[1]
+        else
+          group_id = data[:group]
+        end
+        if not_primitives.include?(group_id.to_sym)
+          group = not_primitives[group_id.to_sym]
+        else
+          group = ClusterEntity::Group.new
+          group.id = group_id
+          group.meta_attr = ClusterEntity::get_meta_attr_from_status_v1(
+            group.id,
+            data[:meta_attr]
+          )
+          not_primitives[group_id.to_sym] = group
+          unless data[:clone] or data[:master]
+            tree << group
+          end
+        end
+        p.parent = group
+        group.members << p
+      end
+      if data[:clone] or data[:master]
+        if data[:group]
+          mi_id = data[:group].split('/', 2)[0]
+        else
+          mi_id = (data[:clone] or data[:master])
+        end
+        unless not_primitives.include?(mi_id.to_sym)
+          if data[:clone]
+            mi =  ClusterEntity::Clone.new
+          else
+            mi = ClusterEntity::MasterSlave.new
+            mi.masters_unknown = true
+          end
+          mi.id = mi_id
+          mi.meta_attr = ClusterEntity::get_meta_attr_from_status_v1(
+            mi_id,
+            data[:meta_attr]
+          )
+          if group
+            group.parent = mi
+            mi.member = group
+          else
+            p.parent = mi
+            mi.member = p
+          end
+          not_primitives[mi_id.to_sym] = mi
+          tree << mi
+        end
+      end
+    }
+    tree.each {|resource|
+      resource.update_status
+    }
+    return tree
+  end
+
+  class JSONable
+    def to_status(version='1')
+      ClusterEntity::obj_to_hash(self)
+    end
+    def to_json(version='1')
+      JSON.generate(to_status(version))
+    end
+  end
+
+  class NvPair < JSONable
+    attr_accessor :id, :name, :value
+
+    def initialize(id, name, value=nil)
+      @id = id
+      @name = name
+      @value = value
+    end
+
+    def self.from_dom(nvpair_dom_element)
+      return NvPair.new(
+        nvpair_dom_element.attributes['id'],
+        nvpair_dom_element.attributes['name'],
+        nvpair_dom_element.attributes['value']
+      )
+    end
+  end
+
+  class NvSet < JSONable
+    include Enumerable
+
+    def initialize
+      @list = []
+    end
+
+    def include?(name)
+      @list.each { |pair|
+        return true if pair.name == name
+      }
+      return false
+    end
+
+    def [](name)
+      @list.each { |pair|
+        return pair if pair.name == name
+      }
+      return nil
+    end
+
+    def <<(pair)
+      unless pair.instance_of?(ClusterEntity::NvPair)
+        raise ArgumentError.new('Argument has to be NvPair')
+      end
+      p = self[pair.name]
+      if p
+        @list.delete(p)
+      end
+      @list << pair
+      return self
+    end
+
+    def each(&block)
+      return enum_for(__method__) if block.nil?
+      @list.each do |ob|
+        block.call(ob)
+      end
+    end
+
+    def empty?
+      @list.empty?
+    end
+
+    def length
+      @list.length
+    end
+
+    def delete(name)
+      @list.delete(self[name])
+    end
+
+    def to_status(version='1')
+      status = []
+      @list.each { |pair|
+        status << pair.to_status(version)
+      }
+      return status
+    end
+  end
+
+  class ResourceStatus
+    include Comparable
+    attr_reader :status
+
+    STATUSES = {
+      :running => {
+        :val => 1,
+        :str => 'running'
+      },
+      :partially_running => {
+        :val => 2,
+        :str => 'partially running'
+      },
+      :disabled => {
+        :val => 3,
+        :str => 'disabled'
+      },
+      :failed => {
+        :val => 4,
+        :str => 'failed'
+      },
+      :blocked => {
+        :val => 5,
+        :str => 'blocked'
+      },
+      :unknown => {
+        :val => 6,
+        :str => 'unknown'
+      }
+    }
+
+    def initialize(status=:unknown)
+      @status = STATUSES.include?(status) ? status : :unknown
+    end
+
+    def to_s
+      STATUSES[@status][:str]
+    end
+
+    def <=>(other)
+      STATUSES[@status][:val] <=> STATUSES[other.status][:val]
+    end
+  end
+
+
+  class Resource < JSONable
+    attr_accessor :parent, :meta_attr, :id, :error_list, :warning_list,
+                  :status
+    attr_reader :class_type
+
+    def initialize(resource_cib_element=nil, crm_dom=nil, parent=nil)
+      @class_type = nil
+      @parent = parent
+      @meta_attr = ClusterEntity::NvSet.new
+      @id = nil
+      @error_list = []
+      @warning_list = []
+      @status = ClusterEntity::ResourceStatus.new
+      element_names = {
+        'ClusterEntity::Primitive'.to_sym => 'primitive',
+        'ClusterEntity::Group'.to_sym => 'group',
+        'ClusterEntity::Clone'.to_sym => 'clone',
+        'ClusterEntity::MasterSlave'.to_sym => 'master'
+      }
+      if (resource_cib_element and
+        resource_cib_element.name == element_names[self.class.name.to_sym]
+      )
+        @id = resource_cib_element.attributes['id']
+        resource_cib_element.elements.each('meta_attributes/nvpair') { |e|
+          @meta_attr << ClusterEntity::NvPair.from_dom(e)
+        }
+      end
+    end
+
+    def disabled?
+      return true if @parent and @parent.disabled?
+      return !!(@meta_attr['target-role'] and
+        @meta_attr['target-role'].value.downcase == 'stopped'
+      )
+    end
+
+    def get_group
+      if parent.instance_of?(ClusterEntity::Group)
+        return parent.id
+      end
+      return nil
+    end
+
+    def get_clone
+      if parent.instance_of?(ClusterEntity::Clone)
+        return parent.id
+      elsif (parent.instance_of?(ClusterEntity::Group) and
+        parent.parent.instance_of?(ClusterEntity::Clone)
+      )
+        return parent.parent.id
+      end
+      return nil
+    end
+
+    def get_master
+      if parent.instance_of?(ClusterEntity::MasterSlave)
+        return parent.id
+      elsif (parent.instance_of?(ClusterEntity::Group) and
+        parent.parent.instance_of?(ClusterEntity::MasterSlave)
+      )
+        return parent.parent.id
+      end
+      return nil
+    end
+
+    def to_status(version='1')
+      if version == '2'
+        status = ClusterEntity::obj_to_hash(
+          self,
+          [:@id, :@error_list, :@warning_list, :@class_type]
+        )
+        status.update(
+          {
+            :status => @status.to_s,
+            :meta_attr => @meta_attr.to_status,
+            :parent_id => @parent ? @parent.id : nil,
+            :disabled => disabled?
+          }
+        )
+      else
+        status = ClusterEntity::obj_to_hash(self, [:@id])
+      end
+      return status
+    end
+
+    def get_map
+      return {@id.to_sym => self}
+    end
+  end
+
+
+  class CRMResourceStatus < JSONable
+    attr_accessor :id, :resource_agent, :managed, :failed, :role, :active,
+                  :orphaned, :failure_ignored, :nodes_running_on, :pending,
+                  :node
+
+    def initialize(resource_crm_element=nil)
+      @id = nil
+      @resource_agent = nil
+      @managed = false
+      @failed = false
+      @role = nil
+      @active = false
+      @orphaned = false
+      @failure_ignored = false
+      @nodes_running_on = 0
+      @pending = nil
+      @node = nil
+
+      if resource_crm_element and resource_crm_element.name == 'resource'
+        crm = resource_crm_element
+        @id = crm.attributes['id']
+        @resource_agent = crm.attributes['resource_agent']
+        @managed = crm.attributes['managed'] == 'true'
+        @failed = crm.attributes['failed'] == 'true'
+        @role = crm.attributes['role']
+        @active = crm.attributes['active'] == 'true'
+        @orphaned = crm.attributes['orphaned'] == 'true'
+        @failure_ignored = crm.attributes['failure_ignored'] == 'true'
+        @nodes_running_on = crm.attributes['nodes_running_on'].to_i
+        @pending = crm.attributes['pending']
+        node = crm.elements['node']
+        if node
+          @node = {
+            :name => node.attributes['name'],
+            :id => node.attributes['id'],
+            :cached => node.attributes['cached'] == 'true'
+          }
+        end
+      end
+    end
+  end
+
+
+  class Primitive < Resource
+    attr_accessor :agentname, :_class, :provider, :type, :stonith,
+                  :instance_attr, :crm_status, :operations, :utilization
+
+    def initialize(primitive_cib_element=nil, rsc_status=nil, parent=nil,
+        operations=nil)
+      super(primitive_cib_element, nil, parent)
+      @class_type = 'primitive'
+      @agentname = nil
+      @_class = nil
+      @provider = nil
+      @type = nil
+      @stonith = false
+      @instance_attr = ClusterEntity::NvSet.new
+      @crm_status = []
+      @operations = []
+      @utilization = ClusterEntity::NvSet.new
+      cib = primitive_cib_element
+      if primitive_cib_element and primitive_cib_element.name == 'primitive'
+        @_class = cib.attributes['class']
+        @provider = cib.attributes['provider']
+        @type = cib.attributes['type']
+        @agentname = ("#{@_class}%s:#{@type}" % [
+          @provider ? "::#{@provider}" : ''
+        ]) if @_class and @type
+
+        cib.elements.each('instance_attributes/nvpair') { |e|
+          @instance_attr << ClusterEntity::NvPair.from_dom(e)
+        }
+        cib.elements.each('utilization/nvpair') { |e|
+          @utilization << ClusterEntity::NvPair.from_dom(e)
+        }
+        @stonith = @_class == 'stonith'
+        if @id and rsc_status
+          @crm_status = rsc_status[@id] || []
+        end
+
+        @status = get_status
+        load_operations(operations)
+      end
+    end
+
+    def update_status
+      @status = get_status
+    end
+
+    def get_status
+      running = 0
+      failed = 0
+      @crm_status.each do |s|
+        if s.active
+          running += 1
+        elsif s.failed
+          failed += 1
+        end
+      end
+
+      if disabled?
+        status = ClusterEntity::ResourceStatus.new(:disabled)
+      elsif running > 0
+        status = ClusterEntity::ResourceStatus.new(:running)
+      elsif failed > 0 or @error_list.length > 0
+        status = ClusterEntity::ResourceStatus.new(:failed)
+      else
+        status = ClusterEntity::ResourceStatus.new(:blocked)
+      end
+
+      return status
+    end
+
+    def load_operations(operations)
+      @operations = []
+      unless operations and @id and operations[@id]
+        return
+      end
+
+      failed_ops = []
+      message_list = []
+      operations[@id].each { |o|
+        @operations << o
+        if o.rc_code != 0
+          # 7 == OCF_NOT_RUNNING == The resource is safely stopped.
+          next if o.operation == 'monitor' and o.rc_code == 7
+          # 8 == OCF_RUNNING_MASTER == The resource is running in master mode.
+          next if 8 == o.rc_code
+          failed_ops << o
+          message = "Failed to #{o.operation} #{@id}"
+          message += " on #{Time.at(o.last_rc_change).asctime}"
+          message += " on node #{o.on_node}" if o.on_node
+          message += ": #{o.exit_reason}" if o.exit_reason
+          message_list << {
+            :message => message
+          }
+        end
+      }
+
+      status = get_status
+      if (failed_ops.length > 0 and
+        status == ClusterEntity::ResourceStatus.new(:blocked)
+      )
+        @status = ClusterEntity::ResourceStatus.new(:failed)
+      end
+
+      if @status == ClusterEntity::ResourceStatus.new(:failed)
+        @error_list += message_list
+      else
+        @warning_list += message_list
+      end
+    end
+
+    def disabled?
+      if @stonith
+        return false
+      end
+      return super
+    end
+
+    def to_status(version='1')
+      hash = super(version)
+      if version == '2'
+        hash.update(
+          ClusterEntity::obj_to_hash(
+            self,
+            [:@agentname, :@provider, :@type, :@stonith]
+          )
+        )
+        hash[:utilization] = @utilization.to_status
+        hash[:instance_attr] = @instance_attr.to_status
+        hash[:class] = @_class
+
+        rsc_status = []
+        @crm_status.each { |s|
+          rsc_status << s.to_status(version)
+        }
+        hash[:crm_status] = rsc_status
+
+        operations = []
+        @operations.each { |o|
+          operations << o.to_status(version)
+        }
+        hash[:operations] = operations
+      else
+        instance_attr = {}
+        @instance_attr.each { |v|
+          instance_attr[v.name.to_sym] = v.value
+        }
+        hash.update(
+          {
+            :agentname => @agentname,
+            :group => nil,
+            :clone => false,
+            :clone_id => nil,
+            :ms => false,
+            :ms_id => nil,
+            :operations => [],
+            :meta_attr => ClusterEntity::get_meta_attr_version1(self),
+            :instance_attr => instance_attr,
+            :options => instance_attr,
+            :stonith => @stonith,
+            :disabled => disabled?,
+            :active => false,
+            :failed => false,
+            :orphaned => false,
+            :nodes => [],
+          }
+        )
+        if @crm_status and @crm_status.length >= 1
+          rsc = hash
+          hash = []
+          @crm_status.each do |s|
+            actual = {}
+            actual.update(rsc)
+            actual.update(
+              ClusterEntity::obj_to_hash(s, [:@active, :@failed, :@orphaned])
+            )
+            actual[:nodes] = (s.node) ? [s.node[:name]] : []
+            hash << actual
+          end
+        else
+          hash.update(
+            ClusterEntity::obj_to_hash(
+              CRMResourceStatus.new,
+              [:@active, :@failed, :@orphaned]
+            )
+          )
+          hash = [hash]
+        end
+      end
+      return hash
+    end
+  end
+
+
+  class Group < Resource
+    attr_accessor :members
+
+    def initialize(
+      group_cib_element=nil, rsc_status=nil, parent=nil, operations=nil
+    )
+      super(group_cib_element, nil, parent)
+      @class_type = 'group'
+      @members = []
+      if group_cib_element and group_cib_element.name == 'group'
+        @status = ClusterEntity::ResourceStatus.new(:running)
+        group_cib_element.elements.each('primitive') { |e|
+          p = Primitive.new(e, rsc_status, self, operations)
+          members << p
+        }
+        update_status
+      end
+    end
+
+    def update_status
+      @status = ClusterEntity::ResourceStatus.new(:running)
+      first = true
+      @members.each { |p|
+        p.update_status
+        if first
+          first = false
+          next
+        end
+        if (
+          p.status == ClusterEntity::ResourceStatus.new(:disabled) or
+          p.status == ClusterEntity::ResourceStatus.new(:blocked) or
+          p.status == ClusterEntity::ResourceStatus.new(:failed)
+        )
+          @status = ClusterEntity::ResourceStatus.new(:partially_running)
+        end
+      }
+      if (@members and @members.length > 0 and
+        (ClusterEntity::ResourceStatus.new(:running) != @members[0].status and
+        ClusterEntity::ResourceStatus.new(:unknown) != @members[0].status)
+      )
+        @status = @members[0].status
+      end
+      if disabled?
+        @status = ClusterEntity::ResourceStatus.new(:disabled)
+      end
+    end
+
+    def to_status(version='1')
+      if version == '2'
+        hash = super(version)
+        members = []
+        @members.each do |m|
+          members << m.to_status(version)
+        end
+        hash[:members] = members
+      else
+        hash = []
+        meta_attr = ClusterEntity::get_meta_attr_version1(self)
+        @members.each do |m|
+          hash.concat(m.to_status(version))
+        end
+        group_id = (@parent) ? "#{@parent.id}/#{@id}" : @id
+        hash.each do |m|
+          m[:group] = group_id
+          m[:meta_attr] = ClusterEntity::merge_meta_attr_version1(
+            m[:meta_attr],
+            meta_attr
+          )
+        end
+      end
+      return hash
+    end
+
+    def get_map
+      map = super
+      @members.each do |m|
+        map.update(m.get_map)
+      end
+      return map
+    end
+  end
+
+
+  class MultiInstance < Resource
+    attr_accessor :member, :unique, :managed, :failed, :failure_ignored
+
+    def initialize(resource_cib_element=nil, crm_dom=nil, rsc_status=nil,
+                   parent=nil, operations=nil)
+      super(resource_cib_element, nil, parent)
+      @member = nil
+      @multi_state = false
+      @unique = false
+      @managed = false
+      @failed = false
+      @failure_ignored = false
+      element_names = {
+        'ClusterEntity::Clone'.to_sym => 'clone',
+        'ClusterEntity::MasterSlave'.to_sym => 'master'
+      }
+      if (resource_cib_element and
+        resource_cib_element.name == element_names[self.class.name.to_sym]
+      )
+        member = resource_cib_element.elements['group | primitive']
+        if member and member.name == 'group'
+          @member = Group.new(member, rsc_status, self, operations)
+        elsif member and member.name == 'primitive'
+          @member = Primitive.new(member, rsc_status, self, operations)
+        end
+        update_status
+        if crm_dom
+          status = crm_dom.elements["/crm_mon/resources//clone[@id='#{@id}']"]
+          if status
+            @unique = status.attributes['unique'] == 'true'
+            @managed = status.attributes['managed'] == 'true'
+            @failed = status.attributes['failed'] == 'true'
+            @failure_ignored = status.attributes['failure_ignored'] == 'true'
+          end
+        end
+      end
+    end
+
+    def update_status
+      if @member
+        @member.update_status
+        @status = @member.status
+      end
+      if disabled?
+        @status = ClusterEntity::ResourceStatus.new(:disabled)
+      end
+    end
+
+    def to_status(version='1')
+      if version == '2'
+        hash = super(version)
+        hash[:member] = @member.to_status(version)
+        return hash
+      else
+        return @member ? @member.to_status(version) : []
+      end
+    end
+
+    def get_map
+      map = super
+      map.update(@member.get_map)
+      return map
+    end
+  end
+
+
+  class Clone < MultiInstance
+
+    def initialize(
+      resource_cib_element=nil, crm_dom=nil, rsc_status=nil, parent=nil,
+      operations=nil
+    )
+      super(resource_cib_element, crm_dom, rsc_status, parent, operations)
+      @class_type = 'clone'
+    end
+
+    def to_status(version='1')
+      member = super(version)
+      if version == '2'
+        return member
+      else
+        meta_attr = []
+        unless @member.instance_of?(Group)
+          meta_attr = ClusterEntity::get_meta_attr_version1(self)
+        end
+        clone_id = @member.instance_of?(Group) ? @member.id : @id
+        member.each do |m|
+          m[:clone] = true
+          m[:clone_id] = clone_id
+          m[:meta_attr] = ClusterEntity::merge_meta_attr_version1(
+            m[:meta_attr],
+            meta_attr
+          )
+        end
+        return member
+      end
+    end
+  end
+
+
+  class MasterSlave < MultiInstance
+    attr_accessor :masters, :slaves, :masters_unknown
+
+    def initialize(master_cib_element=nil, crm_dom=nil, rsc_status=nil, parent=nil, operations=nil)
+      super(master_cib_element, crm_dom, rsc_status, parent, operations)
+      @masters_unknown = false
+      @class_type = 'master'
+      @masters = []
+      @slaves = []
+      update_status
+      if @member
+        if @member.instance_of?(Primitive)
+          primitive_list = [@member]
+        else
+          primitive_list = @member.members
+        end
+        @masters, @slaves = get_masters_slaves(primitive_list)
+        if (@masters.empty? and !@masters_unknown and
+          @status != ClusterEntity::ResourceStatus.new(:disabled)
+        )
+          @warning_list << {
+            :message => 'Resource is master/slave but has not been promoted '\
+              + 'to master on any node.',
+            :type => 'no_master'
+          }
+        end
+      end
+    end
+
+    def to_status(version='1')
+      member = super(version)
+      if version == '2'
+        return member
+      else
+        meta_attr = []
+        unless @member.instance_of?(Group)
+          meta_attr = ClusterEntity::get_meta_attr_version1(self)
+        end
+        ms_id = @member.instance_of?(Group) ? @member.id : @id
+        member.each do |m|
+          m[:ms] = true
+          m[:ms_id] = ms_id
+          m[:meta_attr] = ClusterEntity::merge_meta_attr_version1(
+            m[:meta_attr],
+            meta_attr
+          )
+        end
+        return member
+      end
+    end
+
+    def update_status
+      if @member
+        @member.update_status
+        @status = @member.status
+        if @member.instance_of?(Primitive)
+          primitive_list = [@member]
+        else
+          primitive_list = @member.members
+        end
+        @masters, @slaves = get_masters_slaves(primitive_list)
+        if (@masters.empty? and !@masters_unknown and
+          @member.status == ClusterEntity::ResourceStatus.new(:running)
+        )
+          @status = ClusterEntity::ResourceStatus.new(:partially_running)
+        end
+      end
+      if disabled?
+        @status = ClusterEntity::ResourceStatus.new(:disabled)
+      end
+    end
+
+    private
+    def get_masters_slaves(primitive_list)
+      masters = []
+      slaves = []
+      primitive_list.each { |primitive|
+        if primitive.instance_of?(ClusterEntity::Primitive)
+          primitive.crm_status.each { |stat|
+            if stat.role == 'Master'
+              if stat.node
+                masters << stat.node[:name]
+              end
+            else
+              if stat.node
+                slaves << stat.node[:name]
+              end
+            end
+          }
+        end
+      }
+      return [masters, slaves]
+    end
+  end
+
+
+  class ResourceOperation < JSONable
+    attr_accessor :call_id, :crm_debug_origin, :crm_feature_set, :exec_time,
+                  :exit_reason, :id, :interval, :last_rc_change, :last_run,
+                  :on_node, :op_digest, :operation, :operation_key,
+                  :op_force_restart, :op_restart_digest, :op_status,
+                  :queue_time, :rc_code, :transition_key, :transition_magic
+    def initialize(op_element)
+      @call_id = op_element.attributes['call-id'].to_i
+      @crm_debug_origin = op_element.attributes['crm-debug-origin']
+      @crm_feature_set = op_element.attributes['crm_feature_set']
+      @exec_time = op_element.attributes['exec-time'].to_i
+      @exit_reason = op_element.attributes['exit-reason']
+      @id = op_element.attributes['id']
+      @interval = op_element.attributes['interval'].to_i
+      @last_rc_change = op_element.attributes['last-rc-change'].to_i
+      @last_run = op_element.attributes['last-run'].to_i
+      @on_node = op_element.attributes['on_node']
+      @op_digest = op_element.attributes['op-digest']
+      @operation_key = op_element.attributes['operation_key']
+      @operation = op_element.attributes['operation']
+      @op_force_restart = op_element.attributes['op-force-restart']
+      @op_restart_digest = op_element.attributes['op-restart-digest']
+      @op_status = op_element.attributes['op-status'].to_i
+      @queue_time = op_element.attributes['queue-time'].to_i
+      @rc_code = op_element.attributes['rc-code'].to_i
+      @transition_key = op_element.attributes['transition-key']
+      @transition_magic = op_element.attributes['transition-magic']
+
+      unless @on_node
+        elem = op_element.parent
+        while elem
+          if elem.name == 'node_state'
+            @on_node = elem.attributes['uname']
+            break
+          end
+          elem = elem.parent
+        end
+      end
+    end
+  end
+
+
+  class Node < JSONable
+    attr_accessor :id, :error_list, :warning_list, :status, :quorum, :uptime,
+                  :name, :corosync, :pacemaker, :cman, :corosync_enabled,
+                  :pacemaker_enabled, :pcsd_enabled
+
+    def initialize
+      @id = nil
+      @error_list = []
+      @warning_list = []
+      @status = 'unknown'
+      @quorum = nil
+      @uptime = 'unknown'
+      @name = nil
+      @corosync = false
+      @pacemaker = false
+      @cman = false
+      @corosync_enabled = false
+      @pacemaker_enabled = false
+      @pcsd_enabled = false
+    end
+
+    def self.load_current_node(session, crm_dom=nil)
+      node = ClusterEntity::Node.new
+      node.corosync = corosync_running?
+      node.corosync_enabled = corosync_enabled?
+      node.pacemaker = pacemaker_running?
+      node.pacemaker_enabled = pacemaker_enabled?
+      node.cman = cman_running?
+      node.pcsd_enabled = pcsd_enabled?
+
+      node_online = (node.corosync and node.pacemaker)
+      node.status =  node_online ? 'online' : 'offline'
+
+      node.uptime = get_node_uptime
+      node.id = get_local_node_id
+
+      if node_online and crm_dom
+        node_el = crm_dom.elements["//node[@id='#{node.id}']"]
+        if node_el and node_el.attributes['standby'] == 'true'
+          node.status = 'standby'
+        else
+          node.status = 'online'
+        end
+        node.quorum = !!crm_dom.elements['//current_dc[@with_quorum="true"]']
+      else
+        node.status = 'offline'
+      end
+
+      return node
+    end
+  end
+end
diff --git a/pcsd/config.rb b/pcsd/config.rb
index 8a9dd33..011c2bb 100644
--- a/pcsd/config.rb
+++ b/pcsd/config.rb
@@ -1,50 +1,116 @@
 require 'json'
-require 'pp'
+require 'orderedhash'
+
+require 'cluster.rb'
+require 'permissions.rb'
 
 class PCSConfig
-  attr_accessor :clusters
+  CURRENT_FORMAT = 2
+  attr_accessor :clusters, :permissions_local, :format_version, :data_version
 
-  def initialize
+  def initialize(cfg_text)
+    @format_version = 0
+    @data_version = 0
     @clusters = []
+    @permissions_local = Permissions::PermissionsSet.new([])
+
+    input_clusters = []
+    input_permissions = {}
+
     begin
-      json = File.read(SETTINGS_FILE)
-      input_clusters = JSON.parse(json)
-    rescue
-      input_clusters = []
+      json = JSON.parse(cfg_text)
+      if not(json.is_a?(Hash) and json.key?("format_version"))
+        @format_version = 1
+      else
+        @format_version = json["format_version"]
+      end
+
+      if @format_version > CURRENT_FORMAT
+        $logger.warn(
+          "pcs_settings file format version is #{@format_version}" +
+          ", newest fully supported version is #{CURRENT_FORMAT}"
+        )
+      end
+
+      if @format_version >= 2
+        @data_version = json["data_version"] || 0
+        input_clusters = json["clusters"] || []
+        input_permissions = json['permissions'] || {}
+      elsif @format_version == 1
+        input_clusters = json
+        # backward compatibility code start
+        # Old pcsd without permission support was using format_version == 1.
+        # All members of 'haclient' group had unrestricted access.
+        # We give them access to most functions except reading tokens and keys,
+        # they also won't be able to add and remove nodes because of that.
+        input_permissions = {
+          'local_cluster' => [
+            {
+              'type' => Permissions::TYPE_GROUP,
+              'name' => ADMIN_GROUP,
+              'allow' => [
+                Permissions::READ,
+                Permissions::WRITE,
+                Permissions::GRANT,
+              ]
+            },
+          ],
+        }
+        # backward compatibility code end
+      else
+        $logger.error("Unable to parse pcs_settings file")
+      end
+    rescue => e
+      $logger.error("Unable to parse pcs_settings file: #{e}")
     end
+
     input_clusters.each {|c|
       @clusters << Cluster.new(c["name"], c["nodes"])
     }
+
+    if input_permissions.key?('local_cluster')
+      perm_list = []
+      input_permissions['local_cluster'].each { |perm|
+        perm_list << Permissions::EntityPermissions.new(
+          perm['type'], perm['name'], perm['allow']
+        )
+      }
+      @permissions_local = Permissions::PermissionsSet.new(perm_list)
+    end
   end
 
-  def update(cluster_name, node_list)
+  def update_cluster(cluster_name, node_list)
     if node_list.length == 0
       @clusters.delete_if{|c|c.name == cluster_name}
-      $logger.info("Removing cluster: #{cluster_name}")
-      self.save
+      $logger.info("Removing cluster from pcs_settings: #{cluster_name}")
       return
     end
     @clusters.each {|c|
       if c.name == cluster_name
         c.nodes = node_list
-        self.save
         break
       end
     }
   end
 
-  def save
-    out_cluster_array = []
+  def text()
+    out_hash = OrderedHash.new
+    out_hash['format_version'] = CURRENT_FORMAT
+    out_hash['data_version'] = @data_version
+    out_hash['clusters'] = []
+    out_hash['permissions'] = OrderedHash.new
+    out_hash['permissions']['local_cluster'] = []
+
     @clusters.each { |c|
-      temphash = {}
-      temphash["name"] = c.name
-      temphash["nodes"] = c.nodes
-      out_cluster_array << temphash
+      c_hash = OrderedHash.new
+      c_hash['name'] = c.name
+      c_hash['nodes'] = c.nodes.uniq.sort
+      out_hash['clusters'] << c_hash
     }
 
-    File.open(SETTINGS_FILE, "w") do |f|
-	f.write(JSON.pretty_generate(out_cluster_array))
-    end
+    out_hash['permissions']['local_cluster'] = @permissions_local.to_hash()
+
+    return JSON.pretty_generate(out_hash)
   end
 
   def remove_cluster(cluster_name)
@@ -54,7 +120,7 @@ class PCSConfig
   def is_cluster_name_in_use(cname)
     @clusters.each {|c|
       if c.name == cname
-      	return true
+        return true
       end
     }
     return false
@@ -63,7 +129,7 @@ class PCSConfig
   def is_node_in_use(nodename)
     @clusters.each {|c|
       c.nodes.each {|n|
-      	return true if n == nodename
+        return true if n == nodename
       }
     }
     return false
@@ -77,4 +143,61 @@ class PCSConfig
     }
     return nil
   end
+
+  def cluster_nodes_equal?(cluster_name, nodes)
+    my_nodes = get_nodes(cluster_name) || []
+    nodes = nodes || []
+    return my_nodes.sort.uniq == nodes.sort.uniq
+  end
+end
+
+
+class PCSTokens
+  CURRENT_FORMAT = 2
+  attr_accessor :tokens, :format_version, :data_version
+
+  def initialize(cfg_text)
+    @format_version = 0
+    @data_version = 0
+    @tokens = {}
+
+    begin
+      json = JSON.parse(cfg_text)
+      if not(json.is_a?(Hash) and json.key?('format_version') and json.key?('tokens'))
+        @format_version = 1
+      else
+        @format_version = json['format_version']
+      end
+
+      if @format_version > CURRENT_FORMAT
+        $logger.warn(
+          "tokens file format version is #{@format_version}" +
+          ", newest fully supported version is #{CURRENT_FORMAT}"
+        )
+      end
+
+      if @format_version >= 2
+        @data_version = json['data_version'] || 0
+        @tokens = json['tokens'] || {}
+      elsif @format_version == 1
+        @tokens = json
+      else
+        $logger.error('Unable to parse tokens file')
+      end
+    rescue => e
+      $logger.error("Unable to parse tokens file: #{e}")
+    end
+  end
+
+  def text()
+    tokens_hash = OrderedHash.new
+    @tokens.keys.sort.each { |key| tokens_hash[key] = @tokens[key] }
+
+    out_hash = OrderedHash.new
+    out_hash['format_version'] = CURRENT_FORMAT
+    out_hash['data_version'] = @data_version
+    out_hash['tokens'] = tokens_hash
+
+    return JSON.pretty_generate(out_hash)
+  end
 end
diff --git a/pcsd/corosyncconf.rb b/pcsd/corosyncconf.rb
new file mode 100644
index 0000000..6b9a835
--- /dev/null
+++ b/pcsd/corosyncconf.rb
@@ -0,0 +1,152 @@
+module CorosyncConf
+  class Section
+    attr_reader :parent, :name
+
+    def initialize(name)
+      @parent = nil
+      @attr_list = []
+      @section_list = []
+      @name = name
+    end
+
+    def text(indent='    ')
+      lines = []
+      @attr_list.each { |attrib|
+        lines << "#{attrib[0]}: #{attrib[1]}"
+      }
+      lines << '' if not(@attr_list.empty? or @section_list.empty?)
+      last_section = @section_list.length - 1
+      @section_list.each_with_index { |section, index|
+        lines += section.text.split("\n")
+        lines.pop if lines[-1].strip.empty?
+        lines << '' if index < last_section
+      }
+      if @parent
+        lines.map! { |item| item.empty? ? item : indent + item }
+        lines.unshift("#{@name} {")
+        lines << '}'
+      end
+      final = lines.join("\n")
+      final << "\n" if not final.empty?
+      return final
+    end
+
+    def root
+      parent = self
+      parent = parent.parent while parent.parent
+      return parent
+    end
+
+    def attributes(name=nil)
+      return @attr_list.find_all { |attrib| not name or attrib[0] == name }
+    end
+
+    def add_attribute(name, value)
+      @attr_list << [name, value]
+      return self
+    end
+
+    def del_attribute(attribute)
+      @attr_list.delete(attribute)
+      return self
+    end
+
+    def del_attributes_by_name(name, value=nil)
+      @attr_list.reject! { |attrib|
+        attrib[0] == name and (not value or attrib[1] == value)
+      }
+      return self
+    end
+
+    def set_attribute(name, value)
+      found = false
+      new_attr_list = []
+      @attr_list.each { |attrib|
+        if attrib[0] != name
+          new_attr_list << attrib
+        elsif not found
+          found = true
+          attrib[1] = value
+          new_attr_list << attrib
+        end
+      }
+      @attr_list = new_attr_list
+      self.add_attribute(name, value) if not found
+      return self
+    end
+
+    def sections(name=nil)
+      return @section_list.find_all { |section|
+        not name or section.name == name
+      }
+    end
+
+    def add_section(section)
+      parent = self
+      while parent
+        raise CircularParentshipException if parent == section
+        parent = parent.parent
+      end
+      section.parent.del_section(section) if section.parent
+      section.parent = self
+      @section_list << section
+      return self
+    end
+
+    def del_section(section)
+      if @section_list.delete(section)
+        # don't set parent to nil if the section was not found in the list
+        section.parent = nil
+      end
+      return self
+    end
+
+    protected
+
+    def parent=(parent)
+      @parent = parent
+      return self
+    end
+  end
+
+
+  def CorosyncConf::parse_string(conf_text)
+    root = Section.new('')
+    self.parse_section(conf_text.split("\n"), root)
+    return root
+  end
+
+  def CorosyncConf::parse_section(lines, section)
+    # parser is trying to work the same way as an original corosync parser
+    while not lines.empty?
+      current_line = lines.shift().strip()
+      next if current_line.empty? or current_line.start_with?('#')
+      if current_line.include?('{')
+        section_name = current_line.rpartition('{').first
+        new_section = Section.new(section_name.strip)
+        section.add_section(new_section)
+        self.parse_section(lines, new_section)
+      elsif current_line.include?('}')
+        if not section.parent
+          raise ParseErrorException, 'Unexpected closing brace'
+        end
+        return
+      elsif current_line.include?(':')
+        section.add_attribute(
+          *current_line.split(':', 2).map { |part| part.strip }
+        )
+      end
+    end
+    raise ParseErrorException, 'Missing closing brace' if section.parent
+  end
+
+
+  class CorosyncConfException < Exception
+  end
+
+  class CircularParentshipException < CorosyncConfException
+  end
+
+  class ParseErrorException < CorosyncConfException
+  end
+end
diff --git a/pcsd/fenceagent.rb b/pcsd/fenceagent.rb
index 4cf77ee..b52ad6f 100644
--- a/pcsd/fenceagent.rb
+++ b/pcsd/fenceagent.rb
@@ -1,4 +1,4 @@
-def getFenceAgents(fence_agent = nil)
+def getFenceAgents(session, fence_agent = nil)
   fence_agent_list = {}
   agents = Dir.glob('/usr/sbin/fence_' + '*')
   agents.each { |a|
@@ -7,7 +7,7 @@ def getFenceAgents(fence_agent = nil)
     next if fa.name == "fence_ack_manual"
 
     if fence_agent and a.sub(/.*\//,"") == fence_agent.sub(/.*:/,"")
-      required_options, optional_options, advanced_options, info = getFenceAgentMetadata(fa.name)
+      required_options, optional_options, advanced_options, info = getFenceAgentMetadata(session, fa.name)
       fa.required_options = required_options
       fa.optional_options = optional_options
       fa.advanced_options = advanced_options
@@ -18,13 +18,42 @@ def getFenceAgents(fence_agent = nil)
   fence_agent_list
 end
 
-def getFenceAgentMetadata(fenceagentname)
+def getFenceAgentMetadata(session, fenceagentname)
+  options_required = {}
+  options_optional = {}
+  options_advanced = {
+      "priority" => "",
+      "pcmk_host_argument" => "",
+      "pcmk_host_map" => "",
+      "pcmk_host_list" => "",
+      "pcmk_host_check" => ""
+  }
+  for a in ["reboot", "list", "status", "monitor", "off"]
+    options_advanced["pcmk_" + a + "_action"] = ""
+    options_advanced["pcmk_" + a + "_timeout"] = ""
+    options_advanced["pcmk_" + a + "_retries"] = ""
+  end
+
   # There are bugs in stonith_admin & the new fence_agents interaction
   # eventually we'll want to switch back to this, but for now we directly
   # call the agent to get metadata
   #metadata = `stonith_admin --metadata -a #{fenceagentname}`
-  metadata = `/usr/sbin/#{fenceagentname} -o metadata`
-  doc = REXML::Document.new(metadata)
+  if not fenceagentname.start_with?('fence_') or fenceagentname.include?('/')
+    $logger.error "Invalid fence agent '#{fenceagentname}'"
+    return [options_required, options_optional, options_advanced]
+  end
+  stdout, stderr, retval = run_cmd(
+    session, "/usr/sbin/#{fenceagentname}", '-o', 'metadata'
+  )
+  metadata = stdout.join
+  begin
+    doc = REXML::Document.new(metadata)
+  rescue REXML::ParseException => e
+    $logger.error(
+      "Unable to parse metadata of fence agent '#{resourcepath}': #{e}"
+    )
+    return [options_required, options_optional, options_advanced]
+  end
 
   short_desc = ""
   long_desc = ""
@@ -40,21 +69,6 @@ def getFenceAgentMetadata(fenceagentname)
     long_desc = ld.text ? ld.text.strip : ld.text
   }
 
-  options_required = {}
-  options_optional = {}
-  options_advanced = {
-      "timeout" => "",
-      "priority" => "",
-      "pcmk_host_argument" => "",
-      "pcmk_host_map" => "",
-      "pcmk_host_list" => "",
-      "pcmk_host_check" => ""
-  }
-  for a in ["reboot", "list", "status", "monitor", "off"]
-    options_advanced["pcmk_" + a + "_action"] = ""
-    options_advanced["pcmk_" + a + "_timeout"] = ""
-    options_advanced["pcmk_" + a + "_retries"] = ""
-  end
   doc.elements.each('resource-agent/parameters/parameter') { |param|
     temp_array = []
     if param.elements["shortdesc"]
diff --git a/pcsd/pcs.rb b/pcsd/pcs.rb
index 8e1dcb0..2f58502 100644
--- a/pcsd/pcs.rb
+++ b/pcsd/pcs.rb
@@ -5,15 +5,25 @@ require 'shellwords'
 require 'cgi'
 require 'net/http'
 require 'net/https'
-
-def getAllSettings()
-  stdout, stderr, retval = run_cmd(PCS, "property")
-  stdout.map(&:chomp!)
-  stdout.map(&:strip!)
-  stdout2, stderr2, retval2 = run_cmd(PENGINE, "metadata")
+require 'json'
+require 'fileutils'
+require 'backports'
+
+require 'config.rb'
+require 'cfgsync.rb'
+require 'corosyncconf.rb'
+require 'resource.rb'
+require 'cluster_entity.rb'
+require 'auth.rb'
+
+def getAllSettings(session, cib_dom=nil)
+  unless cib_dom
+    cib_dom = get_cib_dom(session)
+  end
+  stdout2, stderr2, retval2 = run_cmd(session, PENGINE, "metadata")
   metadata = stdout2.join
   ret = {}
-  if retval == 0 and retval2 == 0
+  if cib_dom and retval2 == 0
     doc = REXML::Document.new(metadata)
 
     default = ""
@@ -22,24 +32,25 @@ def getAllSettings()
       name = e.attributes["name"]
       name.gsub!(/-/,"_")
       e.elements.each("content") { |c|
-	default = c.attributes["default"]
-	el_type = c.attributes["type"]
+        default = c.attributes["default"]
+        el_type = c.attributes["type"]
       }
       ret[name] = {"value" => default, "type" => el_type}
     }
 
-    stdout.each {|line|
-      key,val = line.split(': ', 2)
+    cib_dom.elements.each('/cib/configuration/crm_config//nvpair') { |e|
+      key = e.attributes['name']
+      val = e.attributes['value']
       key.gsub!(/-/,"_")
       if ret.has_key?(key)
-	if ret[key]["type"] == "boolean"
-	  val == "true" ?  ret[key]["value"] = true : ret[key]["value"] = false
-	else
-	  ret[key]["value"] = val
-	end
+        if ret[key]["type"] == "boolean"
+          val == "true" ?  ret[key]["value"] = true : ret[key]["value"] = false
+        else
+          ret[key]["value"] = val
+        end
 
       else
-	ret[key] = {"value" => val, "type" => "unknown"}
+        ret[key] = {"value" => val, "type" => "unknown"}
       end
     }
     return ret
@@ -47,27 +58,37 @@ def getAllSettings()
   return {"error" => "Unable to get configuration settings"}
 end
 
-def add_fence_level (level, devices, node, remove = false)
+def add_fence_level(session, level, devices, node, remove = false)
   if not remove
-    stdout, stderr, retval = run_cmd(PCS, "stonith", "level", "add", level, node, devices)
-    return retval
+    stdout, stderr, retval = run_cmd(
+      session, PCS, "stonith", "level", "add", level, node, devices
+    )
+    return retval,stdout, stderr
   else
-    stdout, stderr, retval = run_cmd(PCS, "stonith", "level", "remove", level, node, devices)
-    return retval
+    stdout, stderr, retval = run_cmd(
+      session, PCS, "stonith", "level", "remove", level, node, devices
+    )
+    return retval,stdout, stderr
   end
 end
 
-def add_node_attr(node, key, value)
-  stdout, stderr, retval = run_cmd(PCS, "property", "set", "--node", node, key.to_s + '=' + value.to_s)
+def add_node_attr(session, node, key, value)
+  stdout, stderr, retval = run_cmd(
+    session, PCS, "property", "set", "--node", node, key.to_s + '=' + value.to_s
+  )
   return retval
 end
 
-def add_meta_attr(resource, key, value)
-  stdout, stderr, retval = run_cmd(PCS,"resource","meta",resource,key.to_s + "=" + value.to_s)
+def add_meta_attr(session, resource, key, value)
+  stdout, stderr, retval = run_cmd(
+    session, PCS, "resource", "meta", resource, key.to_s + "=" + value.to_s
+  )
   return retval
 end
 
-def add_location_constraint(resource, node, score)
+def add_location_constraint(
+  session, resource, node, score, force=false, autocorrect=true
+)
   if node == ""
     return "Bad node"
   end
@@ -75,18 +96,21 @@ def add_location_constraint(resource, node, score)
   if score == ""
     nodescore = node
   else
-    nodescore = node +"="+score
+    nodescore = node + "=" + score
   end
 
-  stdout, stderr, retval = run_cmd(
-    PCS, "constraint", "location", resource, "prefers", nodescore,
-    "--autocorrect"
-  )
+  cmd = [PCS, "constraint", "location", resource, "prefers", nodescore]
+  cmd << '--force' if force
+  cmd << '--autocorrect' if autocorrect
+
+  stdout, stderr, retval = run_cmd(session, *cmd)
   return retval, stderr.join(' ')
 end
 
-def add_location_constraint_rule(resource, rule, score, force=false)
-  cmd = [PCS, "constraint", "location", "--autocorrect", resource, "rule"]
+def add_location_constraint_rule(
+  session, resource, rule, score, force=false, autocorrect=true
+)
+  cmd = [PCS, "constraint", "location", resource, "rule"]
   if score != ''
     if is_score(score.upcase)
       cmd << "score=#{score.upcase}"
@@ -96,12 +120,14 @@ def add_location_constraint_rule(resource, rule, score, force=false)
   end
   cmd.concat(rule.shellsplit())
   cmd << '--force' if force
-  stdout, stderr, retval = run_cmd(*cmd)
+  cmd << '--autocorrect' if autocorrect
+  stdout, stderr, retval = run_cmd(session, *cmd)
   return retval, stderr.join(' ')
 end
 
 def add_order_constraint(
-    resourceA, resourceB, actionA, actionB, score, symmetrical=true, force=false
+    session, resourceA, resourceB, actionA, actionB, score, symmetrical=true,
+    force=false, autocorrect=true
 )
   sym = symmetrical ? "symmetrical" : "nonsymmetrical"
   if score != ""
@@ -109,78 +135,90 @@ def add_order_constraint(
   end
   command = [
     PCS, "constraint", "order", actionA, resourceA, "then", actionB, resourceB,
-    score, sym, "--autocorrect"
+    score, sym
   ]
   command << '--force' if force
-  stdout, stderr, retval = run_cmd(*command)
+  command << '--autocorrect' if autocorrect
+  stdout, stderr, retval = run_cmd(session, *command)
   return retval, stderr.join(' ')
 end
 
-def add_order_set_constraint(resource_set_list, force=false)
-  command = [PCS, "constraint", "order", "--autocorrect"]
+def add_order_set_constraint(
+  session, resource_set_list, force=false, autocorrect=true
+)
+  command = [PCS, "constraint", "order"]
   resource_set_list.each { |resource_set|
     command << "set"
     command.concat(resource_set)
   }
   command << '--force' if force
-  stdout, stderr, retval = run_cmd(*command)
+  command << '--autocorrect' if autocorrect
+  stdout, stderr, retval = run_cmd(session, *command)
   return retval, stderr.join(' ')
 end
 
-def add_colocation_constraint(resourceA, resourceB, score, force=false)
+def add_colocation_constraint(
+  session, resourceA, resourceB, score, force=false, autocorrect=true
+)
   if score == "" or score == nil
     score = "INFINITY"
   end
   command = [
-    PCS, "constraint", "colocation", "add", resourceA, resourceB, score,
-    "--autocorrect"
+    PCS, "constraint", "colocation", "add", resourceA, resourceB, score
   ]
   command << '--force' if force
-  stdout, stderr, retval = run_cmd(*command)
+  command << '--autocorrect' if autocorrect
+  stdout, stderr, retval = run_cmd(session, *command)
   return retval, stderr.join(' ')
 end
 
-def remove_constraint(constraint_id)
-  stdout, stderror, retval = run_cmd(PCS, "constraint", "remove", constraint_id)
+def remove_constraint(session, constraint_id)
+  stdout, stderror, retval = run_cmd(
+    session, PCS, "constraint", "remove", constraint_id
+  )
   $logger.info stdout
   return retval
 end
 
-def remove_constraint_rule(rule_id)
+def remove_constraint_rule(session, rule_id)
   stdout, stderror, retval = run_cmd(
-    PCS, "constraint", "rule", "remove", rule_id
+    session, PCS, "constraint", "rule", "remove", rule_id
   )
   $logger.info stdout
   return retval
 end
 
-def add_acl_role(name, description)
+def add_acl_role(session, name, description)
   cmd = [PCS, "acl", "role", "create", name.to_s]
   if description.to_s != ""
     cmd << "description=#{description.to_s}"
   end
-  stdout, stderror, retval = run_cmd(*cmd)
+  stdout, stderror, retval = run_cmd(session, *cmd)
   if retval != 0
     return stderror.join("\n").strip
   end
   return ""
 end
 
-def add_acl_permission(acl_role_id, perm_type, xpath_id, query_id)
+def add_acl_permission(session, acl_role_id, perm_type, xpath_id, query_id)
   stdout, stderror, retval = run_cmd(
-    PCS, "acl", "permission", "add", acl_role_id.to_s, perm_type.to_s,
+    session, PCS, "acl", "permission", "add", acl_role_id.to_s, perm_type.to_s,
     xpath_id.to_s, query_id.to_s
   )
   if retval != 0
-    return stderror.join("\n").strip
+    if stderror.empty?
+      return "Error adding permission"
+    else
+      return stderror.join("\n").strip
+    end
   end
   return ""
 end
 
-def add_acl_usergroup(acl_role_id, user_group, name)
+def add_acl_usergroup(session, acl_role_id, user_group, name)
   if (user_group == "user") or (user_group == "group")
     stdout, stderr, retval = run_cmd(
-      PCS, "acl", user_group, "create", name.to_s, acl_role_id.to_s
+      session, PCS, "acl", user_group, "create", name.to_s, acl_role_id.to_s
     )
     if retval == 0
       return ""
@@ -190,46 +228,50 @@ def add_acl_usergroup(acl_role_id, user_group, name)
     end
   end
   stdout, stderror, retval = run_cmd(
-    PCS, "acl", "role", "assign", acl_role_id.to_s, name.to_s
+    session, PCS, "acl", "role", "assign", acl_role_id.to_s, name.to_s
   )
   if retval != 0
-    return stderror.join("\n").strip
+    if stderror.empty?
+      return "Error adding #{user_group}"
+    else
+      return stderror.join("\n").strip
+    end
   end
   return ""
 end
 
-def remove_acl_permission(acl_perm_id)
-  stdout, stderror, retval = run_cmd(PCS, "acl", "permission", "delete", acl_perm_id.to_s)
+def remove_acl_permission(session, acl_perm_id)
+  stdout, stderror, retval = run_cmd(
+    session, PCS, "acl", "permission", "delete", acl_perm_id.to_s
+  )
   if retval != 0
-    return stderror.join("\n").chomp
+    if stderror.empty?
+      return "Error removing permission"
+    else
+      return stderror.join("\n").strip
+    end
   end
   return ""
 end
 
-def remove_acl_usergroup(role_id, usergroup_id)
+def remove_acl_usergroup(session, role_id, usergroup_id)
   stdout, stderror, retval = run_cmd(
-    PCS, "acl", "role", "unassign", role_id.to_s, usergroup_id.to_s,
+    session, PCS, "acl", "role", "unassign", role_id.to_s, usergroup_id.to_s,
     "--autodelete"
   )
   if retval != 0
-    return stderror.join("\n").chomp
+    if stderror.empty?
+      return "Error removing user / group"
+    else
+      return stderror.join("\n").strip
+    end
   end
   return ""
 end
 
-def get_node_token(node)
-  out, stderror, retval = run_cmd(PCS, "cluster", "token", node)
-  return retval, out
-end
-
-def get_token_node_list()
-  out, stderror, retval = run_cmd(PCS, "cluster", "token-nodes")
-  return retval, out
-end
-
 # Gets all of the nodes specified in the pcs config file for the cluster
 def get_cluster_nodes(cluster_name)
-  pcs_config = PCSConfig.new
+  pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
   clusters = pcs_config.clusters
   cluster = nil
   for c in clusters
@@ -248,15 +290,21 @@ def get_cluster_nodes(cluster_name)
   return nodes
 end
 
-def send_cluster_request_with_token(cluster_name, request, post=false, data={}, remote=true, raw_data=nil)
+def send_cluster_request_with_token(session, cluster_name, request, post=false, data={}, remote=true, raw_data=nil)
+  $logger.info("SCRWT: " + request)
+  nodes = get_cluster_nodes(cluster_name)
+  return send_nodes_request_with_token(
+    session, nodes, request, post, data, remote, raw_data
+  )
+end
+
+def send_nodes_request_with_token(session, nodes, request, post=false, data={}, remote=true, raw_data=nil)
   out = ""
   code = 0
-  nodes = get_cluster_nodes(cluster_name)
+  $logger.info("SNRWT: " + request)
 
   # If we're removing nodes, we don't send this to one of the nodes we're
   # removing, unless we're removing all nodes
-
-  $logger.info("SCRWT: " + request)
   if request == "/remove_nodes"
     new_nodes = nodes.dup
     data.each {|k,v|
@@ -268,33 +316,77 @@ def send_cluster_request_with_token(cluster_name, request, post=false, data={},
       nodes = new_nodes
     end
   end
+
   for node in nodes
-    code, out = send_request_with_token(node,request, post, data, remote, raw_data)
-    $logger.info "Node: #{node} Request: #{request}"
-    if out != '{"noresponse":true}' and out != '{"pacemaker_not_running":true}'
-      break
+    $logger.info "SNRWT Node: #{node} Request: #{request}"
+    code, out = send_request_with_token(
+      session, node, request, post, data, remote, raw_data
+    )
+    # try next node if:
+    # - current node does not support the request (old version of pcsd?) (404)
+    # - an exception or other error occurred (5xx)
+    # - we don't have a token for the node (401, notoken)
+    # - we didn't get a response form the node (e.g. an exception occurred)
+    # - pacemaker is not running on the node
+    # do not try next node if
+    # - node returned 400 - it means the request cannot be processed because of
+    #   invalid arguments or another known issue, no node would be able to
+    #   process the request (e.g. removing a non-existing resource)
+    # - node returned 403 - permission denied, no node should allow to process
+    #   the request
+    log = "SNRWT Node #{node} Request #{request}"
+    if (404 == code) or (code >= 500 and code <= 599)
+      $logger.info("#{log}: HTTP code #{code}")
+      next
+    end
+    if (401 == code) or ('{"notoken":true}' == out)
+      $logger.info("#{log}: Bad or missing token")
+      next
+    end
+    if '{"pacemaker_not_running":true}' == out
+      $logger.info("#{log}: Pacemaker not running")
+      next
     end
-    $logger.info "No response: Node: #{node} Request: #{request}"
+    if '{"noresponse":true}' == out
+      $logger.info("#{log}: No response")
+      next
+    end
+    $logger.info("#{log}: HTTP code #{code}")
+    break
   end
-  return code,out
+  return code, out
 end
 
-def send_request_with_token(node, request, post=false, data={}, remote=true, raw_data=nil, timeout=30)
-  start = Time.now
-  begin
-    retval, token = get_node_token(node)
-    if retval != 0
-      return 400,'{"notoken":true}'
-    end
-
-    token = token[0].strip
+def send_request_with_token(session, node, request, post=false, data={}, remote=true, raw_data=nil, timeout=30, additional_tokens={})
+  token = additional_tokens[node] || get_node_token(node)
+  $logger.info "SRWT Node: #{node} Request: #{request}"
+  if not token
+    $logger.error "Unable to connect to node #{node}, no token available"
+    return 400,'{"notoken":true}'
+  end
+  cookies_data = {
+    'token' => token,
+  }
+  return send_request(
+    session, node, request, post, data, remote, raw_data, timeout, cookies_data
+  )
+end
 
+def send_request(session, node, request, post=false, data={}, remote=true, raw_data=nil, timeout=30, cookies_data=nil)
+  cookies_data = {} if not cookies_data
+  begin
     request = "/#{request}" if not request.start_with?("/")
 
+    # fix ipv6 address for URI.parse
+    node6 = node
+    if (node.include?(":") and ! node.start_with?("["))
+      node6 = "[#{node}]"
+    end
+
     if remote
-      uri = URI.parse("https://#{node}:2224/remote" + request)
+      uri = URI.parse("https://#{node6}:2224/remote" + request)
     else
-      uri = URI.parse("https://#{node}:2224" + request)
+      uri = URI.parse("https://#{node6}:2224" + request)
     end
 
     if post
@@ -304,10 +396,31 @@ def send_request_with_token(node, request, post=false, data={}, remote=true, raw
       req = Net::HTTP::Get.new(uri.path)
       req.set_form_data(data)
     end
-    cookies_to_send = [CGI::Cookie.new("name" => 'token', "value" => token).to_s]
-    cookies_to_send << CGI::Cookie.new("name" =>  "CIB_user", "value" => $session[:username].to_s).to_s
-    req.add_field("Cookie",cookies_to_send.join(";"))
-    myhttp = Net::HTTP.new(uri.host, uri.port)
+
+    cookies_to_send = []
+    cookies_data_default = {}
+    # Let's be safe about characters in cookie variables and do base64.
+    # We cannot do it for CIB_user however to be backward compatible
+    # so we at least remove disallowed characters.
+    cookies_data_default['CIB_user'] = PCSAuth.cookieUserSafe(
+      session[:username].to_s
+    )
+    cookies_data_default['CIB_user_groups'] = PCSAuth.cookieUserEncode(
+      (session[:usergroups] || []).join(' ')
+    )
+
+    cookies_data_default.update(cookies_data)
+    cookies_data_default.each { |name, value|
+      cookies_to_send << CGI::Cookie.new('name' => name, 'value' => value).to_s
+    }
+    req.add_field('Cookie', cookies_to_send.join(';'))
+
+    # uri.host returns "[addr]" for ipv6 addresses, which is wrong
+    # uri.hostname returns "addr" for ipv6 addresses, which is correct, but it
+    #   is not available in older ruby versions
+    # There is a bug in Net::HTTP.new in some versions of ruby which prevents
+    # ipv6 addresses being used here at all.
+    myhttp = Net::HTTP.new(node, uri.port)
     myhttp.use_ssl = true
     myhttp.verify_mode = OpenSSL::SSL::VERIFY_NONE
     res = myhttp.start do |http|
@@ -316,49 +429,112 @@ def send_request_with_token(node, request, post=false, data={}, remote=true, raw
     end
     return res.code.to_i, res.body
   rescue Exception => e
-    $logger.info "No response from: #{node} request: #{request}"
+    $logger.info "No response from: #{node} request: #{request}, exception: #{e}"
     return 400,'{"noresponse":true}'
   end
 end
 
-def add_node(new_nodename,all = false, auto_start=true)
+def add_node(session, new_nodename, all=false, auto_start=true)
   if all
+    command = [PCS, "cluster", "node", "add", new_nodename]
     if auto_start
-      out, stderror, retval = run_cmd(PCS, "cluster", "node", "add", new_nodename, "--start", "--enable")
-    else
-      out, stderror, retval = run_cmd(PCS, "cluster", "node", "add", new_nodename)
+      command << '--start'
+      command << '--enable'
     end
+    out, stderror, retval = run_cmd(session, *command)
   else
-    out, stderror, retval = run_cmd(PCS, "cluster", "localnode", "add", new_nodename)
+    out, stderror, retval = run_cmd(
+      session, PCS, "cluster", "localnode", "add", new_nodename
+    )
   end
-  $logger.info("Adding #{new_nodename} from pcs_settings.conf")
-  pcs_config = PCSConfig.new
-  pcs_config.update($cluster_name,get_corosync_nodes())
+  $logger.info("Adding #{new_nodename} to pcs_settings.conf")
+  corosync_nodes = get_corosync_nodes()
+  pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+  pcs_config.update_cluster($cluster_name, corosync_nodes)
+  sync_config = Cfgsync::PcsdSettings.from_text(pcs_config.text())
+  # on version conflict just go on, config will be corrected eventually
+  # by displaying the cluster in the web UI
+  Cfgsync::save_sync_new_version(
+    sync_config, corosync_nodes, $cluster_name, true
+  )
   return retval, out.join("\n") + stderror.join("\n")
 end
 
-def remove_node(new_nodename, all = false)
+def remove_node(session, new_nodename, all=false)
   if all
-    out, stderror, retval = run_cmd(PCS, "cluster", "node", "remove", new_nodename)
+    # we check for a quorum loss warning in remote_remove_nodes
+    out, stderror, retval = run_cmd(
+      session, PCS, "cluster", "node", "remove", new_nodename, "--force"
+    )
   else
-    out, stderror, retval = run_cmd(PCS, "cluster", "localnode", "remove", new_nodename)
+    out, stderror, retval = run_cmd(
+      session, PCS, "cluster", "localnode", "remove", new_nodename
+    )
   end
   $logger.info("Removing #{new_nodename} from pcs_settings.conf")
-  pcs_config = PCSConfig.new
-  pcs_config.update($cluster_name,get_corosync_nodes())
+  corosync_nodes = get_corosync_nodes()
+  pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+  pcs_config.update_cluster($cluster_name, corosync_nodes)
+  sync_config = Cfgsync::PcsdSettings.from_text(pcs_config.text())
+  # on version conflict just go on, config will be corrected eventually
+  # by displaying the cluster in the web UI
+  Cfgsync::save_sync_new_version(
+    sync_config, corosync_nodes, $cluster_name, true
+  )
   return retval, out + stderror
 end
 
 def get_current_node_name()
-  stdout, stderror, retval = run_cmd(CRM_NODE, "-n")
+  stdout, stderror, retval = run_cmd(
+    PCSAuth.getSuperuserSession, CRM_NODE, "-n"
+  )
   if retval == 0 and stdout.length > 0
     return stdout[0].chomp()
   end
   return ""
 end
 
+def get_local_node_id()
+  if ISRHEL6
+    out, errout, retval = run_cmd(
+      PCSAuth.getSuperuserSession, COROSYNC_CMAPCTL, "cluster.cman"
+    )
+    if retval != 0
+      return ""
+    end
+    match = /cluster\.nodename=(.*)/.match(out.join("\n"))
+    if not match
+      return ""
+    end
+    local_node_name = match[1]
+    out, errout, retval = run_cmd(
+      PCSAuth.getSuperuserSession,
+      CMAN_TOOL, "nodes", "-F", "id", "-n", local_node_name
+    )
+    if retval != 0
+      return ""
+    end
+    return out[0].strip()
+  end
+  out, errout, retval = run_cmd(
+    PCSAuth.getSuperuserSession,
+    COROSYNC_CMAPCTL, "-g", "runtime.votequorum.this_node_id"
+  )
+  if retval != 0
+    return ""
+  else
+    return out[0].split(/ = /)[1].strip()
+  end
+end
+
+def get_corosync_conf()
+  return Cfgsync::cluster_cfg_class.from_file().text()
+end
+
 def get_corosync_nodes()
-  stdout, stderror, retval = run_cmd(PCS, "status", "nodes", "corosync")
+  stdout, stderror, retval = run_cmd(
+    PCSAuth.getSuperuserSession, PCS, "status", "nodes", "corosync"
+  )
   if retval != 0
     return []
   end
@@ -371,54 +547,143 @@ def get_corosync_nodes()
   return corosync_nodes
 end
 
-# Get pacemaker nodes, but if they are not present fall back to corosync
 def get_nodes()
-  stdout, stderr, retval = run_cmd(PCS, "status", "nodes")
-  if retval != 0
-    stdout, stderr, retval = run_cmd(PCS, "status", "nodes", "corosync")
-  end
+  nodes = get_nodes_status()
+  return [
+    (nodes["corosync_online"] + nodes["pacemaker_online"]).uniq,
+    (nodes["corosync_offline"] + nodes["pacemaker_offline"] + nodes["pacemaker_standby"]).uniq
+  ]
+end
 
-  online = stdout[1]
-  offline = stdout[2]
+def get_nodes_status()
+  corosync_online = []
+  corosync_offline = []
+  pacemaker_online = []
+  pacemaker_offline = []
+  pacemaker_standby = []
+  in_pacemaker = false
+  stdout, stderr, retval = run_cmd(
+    PCSAuth.getSuperuserSession, PCS, "status", "nodes", "both"
+  )
+  stdout.each {|l|
+    l = l.chomp
+    if l.start_with?("Pacemaker Nodes:")
+      in_pacemaker = true
+    end
+    if l.start_with?("Pacemaker Remote Nodes:")
+      break
+    end
+    if l.end_with?(":")
+      next
+    end
 
-  if online
-    online = online.split(' ')[1..-1].sort
-  else
-    online = []
-  end
+    title,nodes = l.split(/: /,2)
+    if nodes == nil
+      next
+    end
 
-  if offline
-    offline = offline.split(' ')[1..-1].sort
+    if title == " Online"
+      in_pacemaker ? pacemaker_online.concat(nodes.split(/ /)) : corosync_online.concat(nodes.split(/ /))
+    elsif title == " Standby"
+      if in_pacemaker
+        pacemaker_standby.concat(nodes.split(/ /))
+      end
+    elsif title == " Maintenance"
+      if in_pacemaker
+        pacemaker_online.concat(nodes.split(/ /))
+      end
+    else
+      in_pacemaker ? pacemaker_offline.concat(nodes.split(/ /)) : corosync_offline.concat(nodes.split(/ /))
+    end
+  }
+  return {
+    'corosync_online' => corosync_online,
+    'corosync_offline' => corosync_offline,
+    'pacemaker_online' => pacemaker_online,
+    'pacemaker_offline' => pacemaker_offline,
+    'pacemaker_standby' => pacemaker_standby,
+  }
+end
+
+def need_ring1_address?()
+  out, errout, retval = run_cmd(PCSAuth.getSuperuserSession, COROSYNC_CMAPCTL)
+  if retval != 0
+    return false
   else
-    offline = []
+    udpu_transport = false
+    rrp = false
+    out.each { |line|
+      # support both corosync-objctl and corosync-cmapctl format
+      if /^\s*totem\.transport(\s+.*)?=\s*udpu$/.match(line)
+        udpu_transport = true
+      elsif /^\s*totem\.rrp_mode(\s+.*)?=\s*(passive|active)$/.match(line)
+        rrp = true
+      end
+    }
+    # on rhel6 ring1 address is required regardless of transport
+    # it has to be added to cluster.conf in order to set up ring1
+    # in corosync by cman
+    return ((ISRHEL6 and rrp) or (rrp and udpu_transport))
   end
+end
 
-  [online, offline]
+def is_cman_with_udpu_transport?
+  if not ISRHEL6
+    return false
+  end
+  begin
+    cluster_conf = Cfgsync::ClusterConf.from_file().text()
+    conf_dom = REXML::Document.new(cluster_conf)
+    conf_dom.elements.each("cluster/cman") { |elem|
+      if elem.attributes["transport"].downcase == "udpu"
+        return true
+      end
+    }
+  rescue
+    return false
+  end
+  return false
 end
 
-def get_resource_agents_avail()
-  code, result = send_cluster_request_with_token(params[:cluster], 'get_avail_resource_agents')
-  ra = JSON.parse(result)
-  if (ra["noresponse"] == true) or (ra["notauthorized"] == "true") or (ra["notoken"] == true) or (ra["pacemaker_not_running"] == true)
+def get_resource_agents_avail(session)
+  code, result = send_cluster_request_with_token(
+    session, params[:cluster], 'get_avail_resource_agents'
+  )
+  return {} if 200 != code
+  begin
+    ra = JSON.parse(result)
+    if (ra["noresponse"] == true) or (ra["notauthorized"] == "true") or (ra["notoken"] == true) or (ra["pacemaker_not_running"] == true)
+      return {}
+    else
+      return ra
+    end
+  rescue JSON::ParserError
     return {}
-  else
-    return ra
   end
 end
 
-def get_stonith_agents_avail()
-  code, result = send_cluster_request_with_token(params[:cluster], 'get_avail_fence_agents')
-  sa = JSON.parse(result)
-  if (sa["noresponse"] == true) or (sa["notauthorized"] == "true") or (sa["notoken"] == true) or (sa["pacemaker_not_running"] == true)
+def get_stonith_agents_avail(session)
+  code, result = send_cluster_request_with_token(
+    session, params[:cluster], 'get_avail_fence_agents'
+  )
+  return {} if 200 != code
+  begin
+    sa = JSON.parse(result)
+    if (sa["noresponse"] == true) or (sa["notauthorized"] == "true") or (sa["notoken"] == true) or (sa["pacemaker_not_running"] == true)
+      return {}
+    else
+      return sa
+    end
+  rescue JSON::ParserError
     return {}
-  else
-    return sa
   end
 end
 
-def get_cluster_version()
+def get_cluster_name()
   if ISRHEL6
-    stdout, stderror, retval = run_cmd(COROSYNC_CMAPCTL, "cluster")
+    stdout, stderror, retval = run_cmd(
+      PCSAuth.getSuperuserSession, COROSYNC_CMAPCTL, "cluster"
+    )
     if retval == 0
       stdout.each { |line|
         match = /^cluster\.name=(.*)$/.match(line)
@@ -426,7 +691,7 @@ def get_cluster_version()
       }
     end
     begin
-      cluster_conf = File.open('/etc/cluster/cluster.conf').read
+      cluster_conf = Cfgsync::ClusterConf.from_file().text()
     rescue
       return ''
     end
@@ -437,148 +702,147 @@ def get_cluster_version()
     return ''
   end
 
-  stdout, stderror, retval = run_cmd(COROSYNC_CMAPCTL,"totem.cluster_name")
+  stdout, stderror, retval = run_cmd(
+    PCSAuth.getSuperuserSession, COROSYNC_CMAPCTL, "totem.cluster_name"
+  )
   if retval != 0 and not ISRHEL6
     # Cluster probably isn't running, try to get cluster name from
     # corosync.conf
     begin
-      corosync_conf = File.open("/etc/corosync/corosync.conf").read
+      corosync_conf = CorosyncConf::parse_string(
+        Cfgsync::CorosyncConf.from_file().text()
+      )
+      # mimic corosync behavior - the last cluster_name found is used
+      cluster_name = nil
+      corosync_conf.sections('totem').each { |totem|
+        totem.attributes('cluster_name').each { |attrib|
+          cluster_name = attrib[1]
+        }
+      }
+      return cluster_name if cluster_name
     rescue
-      return ""
-    end
-    in_totem = false
-    current_level = 0
-    corosync_conf.each_line do |line|
-      if line =~ /totem\s*\{/
-        in_totem = true
-      end
-      if in_totem
-        md = /cluster_name:\s*(\w+)/.match(line)
-        if md
-          return md[1]
-        end
-      end
-      if in_totem and line =~ /\}/
-        in_totem = false
-      end
+      return ''
     end
-
     return ""
   else
     return stdout.join().gsub(/.*= /,"").strip
   end
 end
 
-def get_node_attributes()
-  stdout, stderr, retval = run_cmd(PCS, "property", "list")
-  if retval != 0
-    return {}
+def get_node_attributes(session, cib_dom=nil)
+  unless cib_dom
+    cib_dom = get_cib_dom(session)
+    return {} unless cib_dom
   end
+  node_attrs = {}
+  cib_dom.elements.each(
+    '/cib/configuration/nodes/node/instance_attributes/nvpair'
+  ) { |e|
+    node = e.parent.parent.attributes['uname']
+    node_attrs[node] ||= []
+    node_attrs[node] << {
+      :id => e.attributes['id'],
+      :key => e.attributes['name'],
+      :value => e.attributes['value']
+    }
+  }
+  node_attrs.each { |_, val| val.sort_by! { |obj| obj[:key] }}
+  return node_attrs
+end
 
-  attrs = {}
-  found = false
-  stdout.each { |line|
-    if not found
-      if line.strip.start_with?("Node Attributes:")
-        found = true
-      end
-      next
-    end
-    if not line.start_with?(" ")
-      break
-    end
-    sline = line.split(":", 2)
-    nodename = sline[0].strip
-    attrs[nodename] = []
-    sline[1].strip.split(" ").each { |attr|
-      key, val = attr.split("=", 2)
-      attrs[nodename] << {:key => key, :value => val}
+def get_nodes_utilization(cib_dom)
+  return {} unless cib_dom
+  utilization = {}
+  cib_dom.elements.each(
+    '/cib/configuration/nodes/node/utilization/nvpair'
+  ) { |e|
+    node = e.parent.parent.attributes['uname']
+    utilization[node] ||= []
+    utilization[node] << {
+      :id => e.attributes['id'],
+      :name => e.attributes['name'],
+      :value => e.attributes['value']
     }
   }
-  return attrs
+  return utilization
 end
 
-def get_fence_levels()
-  stdout, stderr, retval = run_cmd(PCS, "stonith", "level")
-  if retval != 0 or stdout == ""
-    return {}
+def get_fence_levels(session, cib_dom=nil)
+  unless cib_dom
+    cib_dom = get_cib_dom(session)
+    return {} unless cib_dom
   end
 
   fence_levels = {}
-  node = ""
-  stdout.each {|line|
-    if line.start_with?(" Node: ")
-      node = line.split(":",2)[1].strip
-      next
-    end
-    fence_levels[node] ||= []
-    md = / Level (\S+) - (.*)$/.match(line)
-    fence_levels[node] << {"level" => md[1], "devices" => md[2]}
+  cib_dom.elements.each(
+    '/cib/configuration/fencing-topology/fencing-level'
+  ) { |e|
+    target = e.attributes['target']
+    fence_levels[target] ||= []
+    fence_levels[target] << {
+      'level' => e.attributes['index'],
+      'devices' => e.attributes['devices']
+    }
   }
+
+  fence_levels.each { |_, val| val.sort_by! { |obj| obj['level'].to_i }}
   return fence_levels
 end
 
-def get_acls()
-  stdout, stderr, retval = run_cmd(PCS, "acl", "show")
-  if retval != 0 or stdout == ""
-    return []
+def get_acls(session, cib_dom=nil)
+  unless cib_dom
+    cib_dom = get_cib_dom(session)
+    return {} unless cib_dom
   end
 
-  ret_val = {}
-  state = nil
-  user = ""
-  role = ""
-
-  stdout.each do |line|
-    if m = /^User: (.*)$/.match(line)
-      user = m[1]
-      state = "user"
-      ret_val[state] ||= {}
-      ret_val[state][user] ||= []
-      next
-    elsif m = /^Group: (.*)$/.match(line)
-      user = m[1]
-      state = "group"
-      ret_val[state] ||= {}
-      ret_val[state][user] ||= []
-      next
-    elsif m = /^Role: (.*)$/.match(line)
-      role = m[1]
-      state = "role"
-      ret_val[state] ||= {}
-      ret_val[state][role] ||= {}
-      next
-    end
+  acls = {
+    'role' => {},
+    'group' => {},
+    'user' => {},
+    'target' => {}
+  }
 
-    case state
-    when "user", "group"
-      m = /^  Roles: (.*)$/.match(line)
-      ret_val[state][user] ||= []
-      m[1].scan(/\S+/).each {|urole|
-        ret_val[state][user] << urole
+  cib_dom.elements.each('/cib/configuration/acls/*') { |e|
+    type = e.name[4..-1]
+    if e.name == 'acl_role'
+      role_id = e.attributes['id']
+      desc = e.attributes['description']
+      acls[type][role_id] = {}
+      acls[type][role_id]['description'] = desc ? desc : ''
+      acls[type][role_id]['permissions'] = []
+      e.elements.each('acl_permission') { |p|
+        p_id = p.attributes['id']
+        p_kind = p.attributes['kind']
+        val = ''
+        if p.attributes['xpath']
+          val = "xpath #{p.attributes['xpath']}"
+        elsif p.attributes['reference']
+          val = "id #{p.attributes['reference']}"
+        else
+          next
+        end
+        acls[type][role_id]['permissions'] << "#{p_kind} #{val} (#{p_id})"
+      }
+    elsif ['acl_target', 'acl_group'].include?(e.name)
+      id = e.attributes['id']
+      acls[type][id] = []
+      e.elements.each('role') { |r|
+        acls[type][id] << r.attributes['id']
       }
-    when "role"
-      ret_val[state][role] ||= {}
-      ret_val[state][role]["permissions"] ||= []
-      ret_val[state][role]["description"] ||= ""
-      if m = /^  Description: (.*)$/.match(line)
-        ret_val[state][role]["description"] = m[1]
-      elsif m = /^  Permission: (.*)$/.match(line)
-        ret_val[state][role]["permissions"] << m[1]
-      end
     end
-  end
-  return ret_val
+  }
+  acls['user'] = acls['target']
+  return acls
 end
 
-def enable_cluster()
-  stdout, stderror, retval = run_cmd(PCS, "cluster", "enable")
+def enable_cluster(session)
+  stdout, stderror, retval = run_cmd(session, PCS, "cluster", "enable")
   return false if retval != 0
   return true
 end
 
-def disable_cluster()
-  stdout, stderror, retval = run_cmd(PCS, "cluster", "disable")
+def disable_cluster(session)
+  stdout, stderror, retval = run_cmd(session, PCS, "cluster", "disable")
   return false if retval != 0
   return true
 end
@@ -603,12 +867,14 @@ end
 
 def get_corosync_version()
   begin
-    stdout, stderror, retval = run_cmd(COROSYNC, "-v")
+    stdout, stderror, retval = run_cmd(
+      PCSAuth.getSuperuserSession, COROSYNC, "-v"
+    )
   rescue
     stdout = []
   end
   if retval == 0
-    match = /(\d+)\.(\d+)\.(\d+)/.match(stdout.join())
+    match = /version\D+(\d+)\.(\d+)\.(\d+)/.match(stdout.join())
     if match
       return match[1..3].collect { | x | x.to_i }
     end
@@ -636,7 +902,9 @@ end
 
 def get_pacemaker_version()
   begin
-    stdout, stderror, retval = run_cmd(PACEMAKERD, "-$")
+    stdout, stderror, retval = run_cmd(
+      PCSAuth.getSuperuserSession, PACEMAKERD, "-$"
+    )
   rescue
     stdout = []
   end
@@ -660,7 +928,9 @@ end
 
 def get_cman_version()
   begin
-    stdout, stderror, retval = run_cmd(CMAN_TOOL, "-V")
+    stdout, stderror, retval = run_cmd(
+      PCSAuth.getSuperuserSession, CMAN_TOOL, "-V"
+    )
   rescue
     stdout = []
   end
@@ -673,6 +943,28 @@ def get_cman_version()
   return nil
 end
 
+def get_rhel_version()
+  if File.exists?('/etc/system-release')
+    release = File.open('/etc/system-release').read
+    match = /(\d+)\.(\d+)/.match(release)
+    if match
+      return match[1, 2].collect{ |x| x.to_i}
+    end
+  end
+  return nil
+end
+
+def pcsd_restart()
+  fork {
+    sleep(10)
+    if ISSYSTEMCTL
+      `systemctl restart pcsd`
+    else
+      `service pcsd restart`
+    end
+  }
+end
+
 def pcsd_enabled?()
   if ISSYSTEMCTL
     `systemctl is-enabled pcsd.service`
@@ -686,29 +978,905 @@ def get_pcsd_version()
   return PCS_VERSION.split(".").collect { | x | x.to_i }
 end
 
-def run_cmd(*args)
+def run_cmd(session, *args)
+  options = {}
+  return run_cmd_options(session, options, *args)
+end
+
+def run_cmd_options(session, options, *args)
   $logger.info("Running: " + args.join(" "))
   start = Time.now
   out = ""
   errout = ""
-  if $session[:username] == "hacluster"
-    ENV['CIB_user'] = $cookies[:CIB_user]
-  else
-    ENV['CIB_user'] = $session[:username]
-  end
-  $logger.debug("CIB USER: #{ENV['CIB_user'].to_s}")
-  status = Open4::popen4(*args) do |pid, stdin, stdout, stderr|
+
+  proc_block = proc { |pid, stdin, stdout, stderr|
+    if options and options.key?('stdin')
+      stdin.puts(options['stdin'])
+      stdin.close()
+    end
     out = stdout.readlines()
     errout = stderr.readlines()
     duration = Time.now - start
     $logger.debug(out)
+    $logger.debug(errout)
     $logger.debug("Duration: " + duration.to_s + "s")
-  end
+  }
+  cib_user = session[:username]
+  # when running 'id -Gn' to get the groups they are not defined yet
+  cib_groups = (session[:usergroups] || []).join(' ')
+  $logger.info("CIB USER: #{cib_user}, groups: #{cib_groups}")
+  # Open4.popen4 reimplementation which sets ENV in a child process prior
+  # to running an external process by exec
+  status = Open4::do_popen(proc_block, :init) { |ps_read, ps_write|
+    ps_read.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
+    ps_write.fcntl(Fcntl::F_SETFD, Fcntl::FD_CLOEXEC)
+    ENV['CIB_user'] = cib_user
+    ENV['CIB_user_groups'] = cib_groups
+    exec(*args)
+  }
+
   retval = status.exitstatus
-  $logger.debug("Return Value: " + retval.to_s)
+  $logger.info("Return Value: " + retval.to_s)
   return out, errout, retval
 end
 
 def is_score(score)
   return !!/^[+-]?((INFINITY)|(\d+))$/.match(score)
 end
+
+# Does pacemaker consider a variable as true in cib?
+# See crm_is_true in pacemaker/lib/common/utils.c
+def is_cib_true(var)
+  return false if not var.respond_to?(:downcase)
+  return ['true', 'on', 'yes', 'y', '1'].include?(var.downcase)
+end
+
+def read_tokens()
+  return PCSTokens.new(Cfgsync::PcsdTokens.from_file('').text()).tokens
+end
+
+def write_tokens(tokens)
+  begin
+    cfg = PCSTokens.new(Cfgsync::PcsdTokens.from_file('').text())
+    cfg.tokens = tokens
+    Cfgsync::PcsdTokens.from_text(cfg.text()).save()
+  rescue
+    return false
+  end
+  return true
+end
+
+def get_tokens_of_nodes(nodes)
+  tokens = {}
+  read_tokens.each { |node, token|
+    if nodes.include? node
+      tokens[node] = token
+    end
+  }
+  return tokens
+end
+
+def get_node_token(node)
+  tokens = read_tokens()
+  if tokens.include? node
+    return tokens[node]
+  else
+    return nil
+  end
+end
+
+def get_token_node_list()
+  return read_tokens.keys
+end
+
+def add_prefix_to_keys(hash, prefix)
+  new_hash = {}
+  hash.each { |k,v|
+    new_hash["#{prefix}#{k}"] = v
+  }
+  return new_hash
+end
+
+def check_gui_status_of_nodes(session, nodes, check_mutuality=false, timeout=10)
+  options = {}
+  options[:check_auth_only] = '' if not check_mutuality
+  threads = []
+  not_authorized_nodes = []
+  online_nodes = []
+  offline_nodes = []
+
+  nodes = nodes.uniq.sort
+  nodes.each { |node|
+    threads << Thread.new {
+      code, response = send_request_with_token(
+        session, node, 'check_auth', false, options, true, nil, timeout
+      )
+      if code == 200
+        if check_mutuality
+          begin
+            parsed_response = JSON.parse(response)
+            if parsed_response['node_list'] and parsed_response['node_list'].uniq.sort == nodes
+              online_nodes << node
+            else
+              not_authorized_nodes << node
+            end
+          rescue
+            not_authorized_nodes << node
+          end
+        else
+          online_nodes << node
+        end
+      else
+        begin
+          parsed_response = JSON.parse(response)
+          if parsed_response['notauthorized'] or parsed_response['notoken']
+            not_authorized_nodes << node
+          else
+            offline_nodes << node
+          end
+        rescue JSON::ParserError
+        end
+      end
+    }
+  }
+  threads.each { |t| t.join }
+  return online_nodes, offline_nodes, not_authorized_nodes
+end
+
+def pcs_auth(session, nodes, username, password, force=false, local=true)
+  # if no sync is needed, do not report a sync error
+  sync_successful = true
+  sync_failed_nodes = []
+  sync_responses = {}
+  # check for already authorized nodes
+  if not force
+    online, offline, not_authenticated = check_gui_status_of_nodes(
+      session, nodes, true
+    )
+    if not_authenticated.length < 1
+      result = {}
+      online.each { |node| result[node] = {'status' => 'already_authorized'} }
+      offline.each { |node| result[node] = {'status' => 'noresponse'} }
+      return result, sync_successful, sync_failed_nodes, sync_responses
+    end
+  end
+
+  # authorize the nodes locally (i.e. not bidirectionally)
+  auth_responses = run_auth_requests(
+    session, nodes, nodes, username, password, force, true
+  )
+
+  # get the tokens and sync them within the local cluster
+  new_tokens = {}
+  auth_responses.each { |node, response|
+    new_tokens[node] = response['token'] if 'ok' == response['status']
+  }
+  if not new_tokens.empty?
+    cluster_nodes = get_corosync_nodes()
+    tokens_cfg = Cfgsync::PcsdTokens.from_file('')
+    # only tokens used in pcsd-to-pcsd communication can and need to be synced
+    # those are accessible only when running under root account
+    if Process.uid != 0
+      # other tokens just need to be stored localy for the user
+      sync_successful, sync_responses = Cfgsync::save_sync_new_tokens(
+        tokens_cfg, new_tokens, [], nil
+      )
+      return auth_responses, sync_successful, sync_failed_nodes, sync_responses
+    end
+    sync_successful, sync_responses = Cfgsync::save_sync_new_tokens(
+      tokens_cfg, new_tokens, cluster_nodes, $cluster_name
+    )
+    sync_failed_nodes = []
+    sync_not_supported_nodes = []
+    sync_responses.each { |node, response|
+      if 'not_supported' == response['status']
+        sync_not_supported_nodes << node
+      elsif response['status'] != 'ok'
+        sync_failed_nodes << node
+      else
+        node_result = response['result'][Cfgsync::PcsdTokens.name]
+        if 'not_supported' == node_result
+          sync_not_supported_nodes << node
+        elsif not ['accepted', 'rejected'].include?(node_result)
+          sync_failed_nodes << node
+        end
+      end
+    }
+    if not local
+      # authorize nodes outside of the local cluster and nodes not supporting
+      # the tokens file synchronization in the other direction
+      nodes_to_auth = []
+      nodes.each { |node|
+        nodes_to_auth << node if sync_not_supported_nodes.include?(node)
+        nodes_to_auth << node if not cluster_nodes.include?(node)
+      }
+      auth_responses2 = run_auth_requests(
+        session, nodes_to_auth, nodes, username, password, force, false
+      )
+      auth_responses.update(auth_responses2)
+    end
+  end
+
+  return auth_responses, sync_successful, sync_failed_nodes, sync_responses
+end
+
+def run_auth_requests(session, nodes_to_send, nodes_to_auth, username, password, force=false, local=true)
+  data = {}
+  nodes_to_auth.each_with_index { |node, index|
+    data["node-#{index}"] = node
+  }
+  data['username'] = username
+  data['password'] = password
+  data['bidirectional'] = 1 if not local
+  data['force'] = 1 if force
+
+  auth_responses = {}
+  threads = []
+  nodes_to_send.each { |node|
+    threads << Thread.new {
+      code, response = send_request(session, node, 'auth', true, data)
+      if 200 == code
+        token = response.strip
+        if '' == token
+          auth_responses[node] = {'status' => 'bad_password'}
+        else
+          auth_responses[node] = {'status' => 'ok', 'token' => token}
+        end
+      else
+        auth_responses[node] = {'status' => 'noresponse'}
+      end
+    }
+  }
+  threads.each { |t| t.join }
+  return auth_responses
+end
+
+def send_local_configs_to_nodes(
+  session, nodes, force=false, clear_local_permissions=false
+)
+  configs = Cfgsync::get_configs_local(true)
+  if clear_local_permissions
+    pcs_config = PCSConfig.new(configs[Cfgsync::PcsdSettings.name].text())
+    pcs_config.permissions_local = Permissions::PermissionsSet.new([])
+    configs[Cfgsync::PcsdSettings.name].text = pcs_config.text()
+  end
+  publisher = Cfgsync::ConfigPublisher.new(
+    session, configs.values(), nodes, $cluster_name
+  )
+  return publisher.send(force)
+end
+
+def send_local_certs_to_nodes(session, nodes)
+  begin
+    data = {
+      'ssl_cert' => File.read(CRT_FILE),
+      'ssl_key' => File.read(KEY_FILE),
+      'cookie_secret' => File.read(COOKIE_FILE),
+    }
+  rescue => e
+    return {
+      'status' => 'error',
+      'text' => "Unable to read certificates: #{e}",
+      'node_status' => {},
+    }
+  end
+
+  crt_errors = verify_cert_key_pair(data['ssl_cert'], data['ssl_key'])
+  if crt_errors and not crt_errors.empty?
+    return {
+      'status' => 'error',
+      'text' => "Invalid certificate and/or key: #{crt_errors.join}",
+      'node_status' => {},
+    }
+  end
+  secret_errors = verify_cookie_secret(data['cookie_secret'])
+  if secret_errors and not secret_errors.empty?
+    return {
+      'status' => 'error',
+      'text' => "Invalid cookie secret: #{secret_errors.join}",
+      'node_status' => {},
+    }
+  end
+
+  node_response = {}
+  threads = []
+  nodes.each { |node|
+    threads << Thread.new {
+      code, response = send_request_with_token(
+        session, node, '/set_certs', true, data
+      )
+      node_response[node] = [code, response]
+    }
+  }
+  threads.each { |t| t.join }
+
+  node_error = []
+  node_status = {}
+  node_response.each { |node, response|
+    if response[0] == 200
+      node_status[node] = {
+        'status' => 'ok',
+        'text' => 'Success',
+      }
+    else
+      text = response[1]
+      if response[0] == 401
+        text = "Unable to authenticate, try running 'pcs cluster auth'"
+      elsif response[0] == 400
+        begin
+          parsed_response = JSON.parse(response[1], {:symbolize_names => true})
+          if parsed_response[:noresponse]
+            text = "Unable to connect"
+          elsif parsed_response[:notoken] or parsed_response[:notauthorized]
+            text = "Unable to authenticate, try running 'pcs cluster auth'"
+          end
+        rescue JSON::ParserError
+        end
+      end
+      node_status[node] = {
+        'status' => 'error',
+        'text' => text
+      }
+      node_error << node
+    end
+  }
+  return {
+    'status' => node_error.empty?() ? 'ok' : 'error',
+    'text' => node_error.empty?() ? 'Success' : \
+      "Unable to save pcsd certificates to nodes: #{node_error.join(', ')}",
+    'node_status' => node_status,
+  }
+end
+
+def pcsd_restart_nodes(session, nodes)
+  node_response = {}
+  threads = []
+  nodes.each { |node|
+    threads << Thread.new {
+      code, response = send_request_with_token(
+        session, node, '/pcsd_restart', true
+      )
+      node_response[node] = [code, response]
+    }
+  }
+  threads.each { |t| t.join }
+
+  node_error = []
+  node_status = {}
+  node_response.each { |node, response|
+    if response[0] == 200
+      node_status[node] = {
+        'status' => 'ok',
+        'text' => 'Success',
+      }
+    else
+      text = response[1]
+      if response[0] == 401
+        text = "Unable to authenticate, try running 'pcs cluster auth'"
+      elsif response[0] == 400
+        begin
+          parsed_response = JSON.parse(response[1], {:symbolize_names => true})
+          if parsed_response[:noresponse]
+            text = "Unable to connect"
+          elsif parsed_response[:notoken] or parsed_response[:notauthorized]
+            text = "Unable to authenticate, try running 'pcs cluster auth'"
+          end
+        rescue JSON::ParserError
+        end
+      end
+      node_status[node] = {
+        'status' => 'error',
+        'text' => text
+      }
+      node_error << node
+    end
+  }
+  return {
+    'status' => node_error.empty?() ? 'ok' : 'error',
+    'text' => node_error.empty?() ? 'Success' : \
+      "Unable to restart pcsd on nodes: #{node_error.join(', ')}",
+    'node_status' => node_status,
+  }
+end
+
+def write_file_lock(path, perm, data)
+  begin
+    file = nil
+    file = File.open(path, 'w', perm)
+    file.flock(File::LOCK_EX)
+    file.write(data)
+  rescue => e
+    $logger.error("Cannot save file '#{path}': #{e.message}")
+    raise
+  ensure
+    unless file.nil?
+      file.flock(File::LOCK_UN)
+      file.close()
+    end
+  end
+end
+
+def verify_cert_key_pair(cert, key)
+  errors = []
+  cert_modulus = nil
+  key_modulus = nil
+
+  stdout, stderr, retval = run_cmd_options(
+    PCSAuth.getSuperuserSession(),
+    {
+      'stdin' => cert,
+    },
+    '/usr/bin/openssl', 'x509', '-modulus', '-noout'
+  )
+  if retval != 0
+    errors << "Invalid certificate: #{stderr.join}"
+  else
+    cert_modulus = stdout.join.strip
+  end
+
+  stdout, stderr, retval = run_cmd_options(
+    PCSAuth.getSuperuserSession(),
+    {
+      'stdin' => key,
+    },
+    '/usr/bin/openssl', 'rsa', '-modulus', '-noout'
+  )
+  if retval != 0
+    errors << "Invalid key: #{stderr.join}"
+  else
+    key_modulus = stdout.join.strip
+  end
+
+  if errors.empty? and cert_modulus and key_modulus
+    if cert_modulus != key_modulus
+      errors << 'Certificate does not match the key'
+    end
+  end
+
+  return errors
+end
+
+def verify_cookie_secret(secret)
+  if secret.empty?
+    return ['Cookie secret is empty']
+  end
+  return []
+end
+
+def cluster_status_from_nodes(session, cluster_nodes, cluster_name)
+  node_map = {}
+  forbidden_nodes = {}
+  overview = {
+    :cluster_name => cluster_name,
+    :error_list => [],
+    :warning_list => [],
+    :quorate => nil,
+    :status => 'unknown',
+    :node_list => [],
+    :resource_list => [],
+  }
+
+  threads = []
+  cluster_nodes.uniq.each { |node|
+    threads << Thread.new {
+      code, response = send_request_with_token(
+        session,
+        node,
+        'status',
+        false,
+        {:version=>'2', :operations=>'1'},
+        true,
+        nil,
+        15
+      )
+      node_map[node] = {}
+      node_map[node].update(overview)
+      if 403 == code
+        forbidden_nodes[node] = true
+      end
+      node_status_unknown = {
+        :name => node,
+        :status => 'unknown',
+        :warning_list => [],
+        :error_list => []
+      }
+      begin
+        parsed_response = JSON.parse(response, {:symbolize_names => true})
+        if parsed_response[:noresponse]
+          node_map[node][:node] = {}
+          node_map[node][:node].update(node_status_unknown)
+        elsif parsed_response[:notoken] or parsed_response[:notauthorized]
+          node_map[node][:node] = {}
+          node_map[node][:node].update(node_status_unknown)
+          node_map[node][:node][:notauthorized] = true
+        else
+          if parsed_response[:node]
+            parsed_response[:status_version] = '2'
+            parsed_response[:node][:status_version] = '2'
+          else
+            parsed_response = status_v1_to_v2(parsed_response)
+          end
+          node_map[node] = parsed_response
+        end
+        node_map[node][:node][:name] = node
+      rescue JSON::ParserError
+        node_map[node][:node] = {}
+        node_map[node][:node].update(node_status_unknown)
+      end
+    }
+  }
+  threads.each { |t| t.join }
+
+  cluster_nodes_map = {}
+  node_status_list = []
+  quorate_nodes = []
+  not_authorized_nodes = []
+  old_status = false
+  node_map.each { |node_name, cluster_status|
+    # If we were able to get node's cluster name and it's different than
+    # requested cluster name, the node belongs to some other cluster and its
+    # data should not be used.
+    # If we don't know node's cluster name, we keep the data because the node is
+    # possibly in our cluster, we just didn't get its status.
+    next if cluster_status[:cluster_name] != cluster_name
+    cluster_nodes_map[node_name] = cluster_status
+    node_status_list << cluster_status[:node]
+    old_status = true if '1' == cluster_status[:status_version]
+    quorate_nodes << node_name if cluster_status[:node][:quorum]
+    not_authorized_nodes << node_name if cluster_status[:node][:notauthorized]
+  }
+
+  node_status_list.each { |node|
+    return nil if forbidden_nodes[node[:name]]
+  }
+  if cluster_nodes_map.length < 1
+    return overview
+  end
+
+  # if we have quorum, use data from a node in the quorate partition
+  if quorate_nodes.length > 0
+    status = overview.update(cluster_nodes_map[quorate_nodes[0]])
+    status[:quorate] = true
+    status[:node_list] = node_status_list
+  # if we don't have quorum, use data from any online node,
+  # otherwise use data from any node no node has quorum, so no node has any
+  # info about the cluster
+  elsif not old_status
+    node_to_use = cluster_nodes_map.values[0]
+    cluster_nodes_map.each { |_, node_data|
+      if node_data[:node] and node_data[:node][:status] == 'online'
+        node_to_use = node_data
+        break
+      end
+    }
+    status = overview.update(node_to_use)
+    status[:quorate] = false
+    status[:node_list] = node_status_list
+  # old pcsd doesn't provide info about quorum, use data from any node
+  else
+    status = overview
+    status[:quorate] = nil
+    status[:node_list] = node_status_list
+    cluster_nodes_map.each { |_, node|
+      if node[:status_version] and node[:status_version] == '1' and
+          !node[:cluster_settings][:error]
+        status = overview.update(node)
+        break
+      end
+    }
+  end
+  status.delete(:node)
+
+  if status[:quorate]
+    fence_count = 0
+    status[:resource_list].each { |r|
+      if r[:stonith]
+        fence_count += 1
+      end
+    }
+    if fence_count == 0
+      status[:warning_list] << {
+        :message => 'No fence devices configured in the cluster',
+      }
+    end
+
+    if status[:cluster_settings]['stonith-enabled'.to_sym] and
+        not is_cib_true(status[:cluster_settings]['stonith-enabled'.to_sym])
+      status[:warning_list] << {
+        :message => 'Stonith is not enabled',
+      }
+    end
+  end
+
+  if not_authorized_nodes.length > 0
+    status[:warning_list] << {
+      :message => 'Not authorized against node(s) '\
+        + not_authorized_nodes.join(', '),
+      :type => 'nodes_not_authorized',
+      :node_list => not_authorized_nodes,
+    }
+  end
+
+  if status[:quorate].nil?
+    if old_status
+      status[:warning_list] << {
+        :message => 'Cluster is running an old version of pcs/pcsd which '\
+          + "doesn't provide data for the dashboard.",
+        :type => 'old_pcsd'
+      }
+    else
+      status[:error_list] << {
+        :message => 'Unable to connect to the cluster.'
+      }
+    end
+    status[:status] == 'unknown'
+    return status
+  end
+
+  if status[:error_list].length > 0 or (not status[:quorate].nil? and not status[:quorate])
+    status[:status] = 'error'
+  else
+    if status[:warning_list].length > 0
+      status[:status] = 'warning'
+    end
+    status[:node_list].each { |node|
+      if (node[:error_list] and node[:error_list].length > 0) or
+          ['unknown', 'offline'].include?(node[:status])
+        status[:status] = 'error'
+        break
+      elsif node[:warning_list] and node[:warning_list].length > 0
+        status[:status] = 'warning'
+      end
+    }
+    if status[:status] != 'error'
+      status[:resource_list].each { |resource|
+        if ['failed', 'blocked'].include?(resource[:status])
+          status[:status] = 'error'
+          break
+        elsif ['partially running'].include?(resource[:status])
+          status[:status] = 'warning'
+        end
+      }
+    end
+  end
+  status[:status] = 'ok' if status[:status] == 'unknown'
+  return status
+end
+
+def get_node_uptime()
+  uptime = `cat /proc/uptime`.chomp.split(' ')[0].split('.')[0].to_i
+  mm, ss = uptime.divmod(60)
+  hh, mm = mm.divmod(60)
+  dd, hh = hh.divmod(24)
+  return '%d day%s, %02d:%02d:%02d' % [dd, dd != 1?'s':'', hh, mm, ss]
+end
+
+def get_node_status(session, cib_dom)
+  node_status = {
+      :cluster_name => $cluster_name,
+      :groups => [],
+      :constraints => {
+          # :rsc_location => [],
+          # :rcs_colocation => [],
+          # :rcs_order => []
+      },
+      :cluster_settings => {},
+      :need_ring1_address => need_ring1_address?,
+      :is_cman_with_udpu_transport => is_cman_with_udpu_transport?,
+      :acls => get_acls(session, cib_dom),
+      :username => session[:username],
+      :fence_levels => get_fence_levels(session, cib_dom),
+      :node_attr => node_attrs_to_v2(get_node_attributes(session, cib_dom)),
+      :nodes_utilization => get_nodes_utilization(cib_dom),
+      :known_nodes => []
+  }
+
+  nodes = get_nodes_status()
+
+  known_nodes = []
+  nodes.each { |_, node_list|
+    known_nodes.concat node_list
+  }
+  node_status[:known_nodes] = known_nodes.uniq
+
+  nodes.each do |k,v|
+    node_status[k.to_sym] = v
+  end
+
+  if cib_dom
+    node_status[:groups] = get_resource_groups(cib_dom)
+    node_status[:constraints] = getAllConstraints(cib_dom.elements['/cib/configuration/constraints'])
+  end
+
+  cluster_settings = getAllSettings(session, cib_dom)
+  if not cluster_settings.has_key?('error')
+    node_status[:cluster_settings] = cluster_settings
+  end
+
+  return node_status
+end
+
+def get_resource_groups(cib_dom)
+  unless cib_dom
+    return []
+  end
+  group_list = []
+  cib_dom.elements.each('/cib/configuration/resources//group') do |e|
+    group_list << e.attributes['id']
+  end
+  return group_list
+end
+
+def get_resources(cib_dom, crm_dom=nil, get_operations=false)
+  unless cib_dom
+    return []
+  end
+
+  resource_list = []
+  operations = (get_operations) ? ClusterEntity::get_resources_operations(cib_dom) : nil
+  rsc_status = ClusterEntity::get_rsc_status(crm_dom)
+
+  cib_dom.elements.each('/cib/configuration/resources/primitive') do |e|
+    resource_list << ClusterEntity::Primitive.new(e, rsc_status, nil, operations)
+  end
+  cib_dom.elements.each('/cib/configuration/resources/group') do |e|
+    resource_list << ClusterEntity::Group.new(e, rsc_status, nil, operations)
+  end
+  cib_dom.elements.each('/cib/configuration/resources/clone') do |e|
+    resource_list << ClusterEntity::Clone.new(
+      e, crm_dom, rsc_status, nil, operations
+    )
+  end
+  cib_dom.elements.each('/cib/configuration/resources/master') do |e|
+    resource_list << ClusterEntity::MasterSlave.new(
+      e, crm_dom, rsc_status, nil, operations
+    )
+  end
+  return resource_list
+end
+
+def get_resource_by_id(id, cib_dom, crm_dom=nil, rsc_status=nil, operations=false)
+  unless cib_dom
+    return nil
+  end
+
+  e = cib_dom.elements["/cib/configuration/resources//*[@id='#{id}']"]
+  unless e
+    return nil
+  end
+
+  if e.parent.name != 'resources' # if resource is in group, clone or master/slave
+    p = get_resource_by_id(
+      e.parent.attributes['id'], cib_dom, crm_dom, rsc_status, operations
+    )
+    return p.get_map[id.to_sym]
+  end
+
+  case e.name
+    when 'primitive'
+      return ClusterEntity::Primitive.new(e, rsc_status, nil, operations)
+    when 'group'
+      return ClusterEntity::Group.new(e, rsc_status, nil, operations)
+    when 'clone'
+      return ClusterEntity::Clone.new(e, crm_dom, rsc_status, nil, operations)
+    when 'master'
+      return ClusterEntity::MasterSlave.new(e, crm_dom, rsc_status, nil, operations)
+    else
+      return nil
+  end
+end
+
+def get_crm_mon_dom(session)
+  begin
+    stdout, _, retval = run_cmd(
+      session, CRM_MON, '--one-shot', '-r', '--as-xml'
+    )
+    if retval == 0
+      return REXML::Document.new(stdout.join("\n"))
+    end
+  rescue
+    $logger.error 'Failed to parse crm_mon.'
+  end
+  return nil
+end
+
+def get_cib_dom(session)
+  begin
+    stdout, _, retval = run_cmd(session, 'cibadmin', '-Q', '-l')
+    if retval == 0
+      return REXML::Document.new(stdout.join("\n"))
+    end
+  rescue
+    $logger.error 'Failed to parse cib.'
+  end
+  return nil
+end
+
+def node_attrs_to_v2(node_attrs)
+  all_nodes_attr = {}
+  node_attrs.each { |node, attrs|
+    all_nodes_attr[node] = []
+    attrs.each { |attr|
+      all_nodes_attr[node] << {
+        :id => attr[:id],
+        :name => attr[:key],
+        :value => attr[:value]
+      }
+    }
+  }
+  return all_nodes_attr
+end
+
+def status_v1_to_v2(status)
+  new_status = status.select { |k,_|
+    [:cluster_name, :username, :is_cman_with_udpu_transport,
+     :need_ring1_address, :cluster_settings, :constraints, :groups,
+     :corosync_online, :corosync_offline, :pacemaker_online, :pacemaker_standby,
+     :pacemaker_offline, :acls, :fence_levels
+    ].include?(k)
+  }
+  new_status[:node_attr] = node_attrs_to_v2(status[:node_attr])
+
+  resources = ClusterEntity::make_resources_tree(
+    ClusterEntity::get_primitives_from_status_v1(status[:resources])
+  )
+  resources_hash = []
+  resources.each { |r|
+    resources_hash << r.to_status('2')
+  }
+  new_status[:resource_list] = resources_hash
+  new_status[:node] = status.select { |k,_|
+    [:uptime, :corosync, :pacemaker, :cman, :corosync_enabled,
+     :pacemaker_enabled, :pcsd_enabled
+    ].include?(k)
+  }
+
+  new_status[:node].update(
+    {
+      :id => status[:node_id],
+      :quorum => nil,
+      :warning_list => [],
+      :error_list => [],
+      :status => (new_status[:node][:corosync] and
+        new_status[:node][:pacemaker]) ? "online" : "offline",
+      :status_version => '1'
+    }
+  )
+  new_status[:status_version] = '1'
+
+  return new_status
+end
+
+def allowed_for_local_cluster(session, action)
+  pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+  return pcs_config.permissions_local.allows?(
+    session[:username], session[:usergroups], action
+  )
+end
+
+def allowed_for_superuser(session)
+  $logger.debug(
+    "permission check superuser username=#{session[:username]} groups=#{session[:groups]}"
+  )
+  if SUPERUSER != session[:username]
+    $logger.debug('permission denied')
+    return false
+  end
+  $logger.debug('permission granted for superuser')
+  return true
+end
+
+def get_default_overview_node_list(clustername)
+  nodes = get_cluster_nodes clustername
+  node_list = []
+  nodes.each { |node|
+    node_list << {
+      'error_list' => [],
+      'warning_list' => [],
+      'status' => 'unknown',
+      'quorum' => false,
+      'name' => node
+    }
+  }
+  return node_list
+end
diff --git a/pcsd/pcsd b/pcsd/pcsd
index b47472a..1257f9c 100755
--- a/pcsd/pcsd
+++ b/pcsd/pcsd
@@ -13,40 +13,57 @@
 # Should-Stop: 
 # Default-Start:
 # Default-Stop:
-# Short-Description: Starts and stops Pacemaker & Corosync daemon
-# Description: Starts and stops Pacemaker & Corosync daemon
+# Short-Description: Starts and stops Pacemaker & Corosync Configuration daemon
+# Description: Starts and stops Pacemaker & Corosync Configuration daemon
 ### END INIT INFO
 
 # Source function library.
 . /etc/rc.d/init.d/functions
 
 exec="/usr/bin/ruby"
+params=" -C/var/lib/pcsd -I/usr/lib/pcsd -- /usr/lib/pcsd/ssl.rb"
 prog="pcsd"
 config="/var/lib/pcsd"
+pidfile="/var/run/pcsd.pid"
 
 [ -e /etc/sysconfig/$prog ] && . /etc/sysconfig/$prog
 
 lockfile=/var/lock/subsys/$prog
 
 start() {
-    [ -x $exec ] || exit 5
+    [ -x $exec ] || return 5
+    retval=0
     echo -n $"Starting $prog: "
-    export GEM_HOME=/usr/lib/pcsd/vendor/bundle/ruby
-    cd /var/lib/pcsd
-    daemon $exec -I/usr/lib/pcsd /usr/lib/pcsd/ssl.rb
-    # if not running, start it up here, usually something like "daemon $exec"
-    retval=$?
+    if status $prog > /dev/null 2>&1; then
+        success
+    else
+        export GEM_HOME=/usr/lib/pcsd/vendor/bundle/ruby
+        $exec $params > /dev/null 2>&1 &
+        echo $! > $pidfile
+        if status $prog > /dev/null 2>&1; then
+            touch $lockfile
+            success
+        else
+            failure
+            retval=1
+        fi
+    fi
     echo
-    [ $retval -eq 0 ] && touch $lockfile
     return $retval
 }
 
 stop() {
     echo -n $"Stopping $prog: "
-    # stop it here, often "killproc $prog"
+    ! status $prog > /dev/null 2>&1 && return
+    killproc $prog
     retval=$?
+    if [ $retval -eq 0 ]; then
+        rm -f $lockfile
+        success
+    else
+        failure
+    fi
     echo
-    [ $retval -eq 0 ] && rm -f $lockfile
     return $retval
 }
 
@@ -104,4 +121,3 @@ case "$1" in
         exit 2
 esac
 exit $?
-
diff --git a/pcsd/pcsd-cli.rb b/pcsd/pcsd-cli.rb
new file mode 100755
index 0000000..630ff87
--- /dev/null
+++ b/pcsd/pcsd-cli.rb
@@ -0,0 +1,138 @@
+#!/usr/bin/ruby
+
+require 'rubygems'
+require 'etc'
+require 'json'
+require 'stringio'
+require 'orderedhash'
+
+require 'bootstrap.rb'
+require 'pcs.rb'
+require 'auth.rb'
+
+def cli_format_response(status, text=nil, data=nil)
+  response = OrderedHash.new
+  response['status'] = status
+  response['text'] = text if text
+  response['data'] = data if data
+  response['log'] = $logger_device.string.lines.to_a
+  return JSON.pretty_generate(response)
+end
+
+def cli_exit(status, text=nil, data=nil, exitcode=0)
+  puts cli_format_response(status, text, data)
+  exit exitcode
+end
+
+
+# bootstrap, emulate environment created by pcsd http server
+session = {}
+PCS = get_pcs_path(File.expand_path(File.dirname(__FILE__)))
+$logger_device = StringIO.new
+$logger = configure_logger($logger_device)
+
+# check and set user
+uid = Process.uid
+if 0 == uid
+  if ENV['CIB_user'] and ENV['CIB_user'].strip != ''
+    session[:username] = ENV['CIB_user']
+    if ENV['CIB_user_groups'] and ENV['CIB_user_groups'].strip != ''
+      session[:usergroups] = ENV['CIB_user_groups'].split(nil)
+    else
+      session[:usergroups] = []
+    end
+  else
+    session[:username] = SUPERUSER
+    session[:usergroups] = []
+  end
+else
+  username = Etc.getpwuid(uid).name
+  if not PCSAuth.isUserAllowedToLogin(username)
+    cli_exit('access_denied')
+  else
+    session[:username] = username
+    success, groups = PCSAuth.getUsersGroups(username)
+    session[:usergroups] = success ? groups : []
+  end
+end
+
+# continue environment setup with user set in session
+$cluster_name = get_cluster_name()
+
+# get params and run a command
+command = ARGV[0]
+allowed_commands = {
+  'read_tokens' => {
+    # returns tokens of the user who runs pcsd-cli, thus no permission check
+    'only_superuser' => false,
+    'permissions' => nil,
+    'call' => lambda { |params, session| read_tokens() },
+  },
+  'auth' => {
+    'only_superuser' => false,
+    'permissions' => nil,
+    'call' => lambda { |params, session|
+      auth_responses, sync_successful, sync_nodes_err, sync_responses = pcs_auth(
+        session, params['nodes'] || [], params['username'] || '',
+        params['password'] || '', params['force'], params['local']
+      )
+      return {
+        'auth_responses' => auth_responses,
+        'sync_successful' => sync_successful,
+        'sync_nodes_err' => sync_nodes_err,
+        'sync_responses' => sync_responses,
+      }
+    },
+  },
+  'send_local_configs' => {
+    'only_superuser' => false,
+    'permissions' => Permissions::FULL,
+    'call' => lambda { |params, session|
+      send_local_configs_to_nodes(
+        # for a case when sending to a node which is being added to a cluster
+        # - the node doesn't have the config so it cannot check permissions
+        PCSAuth.getSuperuserSession(),
+        params['nodes'] || [],
+        params['force'] || false,
+        params['clear_local_cluster_permissions'] || false
+      )
+    }
+  },
+  'send_local_certs' => {
+    'only_superuser' => false,
+    'permissions' => Permissions::FULL,
+    'call' => lambda { |params, session|
+      send_local_certs_to_nodes(session, params['nodes'] || [])
+    }
+  },
+  'pcsd_restart_nodes' => {
+    'only_superuser' => false,
+    'permissions' => nil,
+    'call' => lambda { |params, session|
+      pcsd_restart_nodes(session, params['nodes'] || [])
+    }
+  },
+}
+
+if allowed_commands.key?(command)
+  begin
+    params = JSON.parse(STDIN.read)
+  rescue JSON::ParserError => e
+    cli_exit('bad_json_input', e.to_s)
+  end
+  if allowed_commands['only_superuser']
+    if not allowed_for_superuser(session)
+      cli_exit('permission_denied')
+    end
+  end
+  if allowed_commands['permissions']
+    if not allowed_for_local_cluster(session, command_settings['permissions'])
+      cli_exit('permission_denied')
+    end
+  end
+  result = allowed_commands[command]['call'].call(params, session)
+  cli_exit('ok', nil, result)
+else
+  cli_exit('bad_command')
+end
+
diff --git a/pcsd/pcsd.conf b/pcsd/pcsd.conf
index 69c4022..f596d59 100644
--- a/pcsd/pcsd.conf
+++ b/pcsd/pcsd.conf
@@ -1,3 +1,4 @@
 # pcsd configuration file
 # Set PCSD_DEBUG to true for advanced pcsd debugging information
 PCSD_DEBUG=false
+RACK_ENV=production
diff --git a/pcsd/pcsd.debian b/pcsd/pcsd.debian
new file mode 100755
index 0000000..2ed3169
--- /dev/null
+++ b/pcsd/pcsd.debian
@@ -0,0 +1,141 @@
+#!/bin/sh
+##
+# pcsd Pacemaker & Corosync configuration daemon
+#
+# chkconfig:   - 21 81 
+# description: Pacemaker & Corosync configuration daemon
+
+### BEGIN INIT INFO
+# Provides: pcsd
+# Required-Start: $remote_fs $network $syslog
+# Required-Stop: $remote_fs $network $syslog
+# Should-Start: 
+# Should-Stop: 
+# Default-Start: 2 3 4 5
+# Default-Stop: 0 1 6
+# Short-Description: Starts and stops Pacemaker & Corosync daemon
+# Description: Starts and stops Pacemaker & Corosync daemon
+### END INIT INFO
+
+# PATH
+PATH=/usr/sbin:/usr/bin:/sbin:/bin
+DESC="pcs daemon"
+NAME=pcsd
+EXEC=ruby
+SUB_EXEC=/usr/share/pcsd/ssl.rb
+DAEMON_USER=root
+DAEMON=/usr/bin/ruby
+DAEMON_ARGS="-C/var/lib/pcsd -I/usr/share/pcsd -- /usr/share/pcsd/ssl.rb"
+PIDFILE=/var/run/$NAME.pid
+SCRIPTNAME=/etc/init.d/$NAME
+LOGFILE=/var/log/$NAME/$NAME.log
+SLEEP_DURATION=2
+
+# Exit if ruby is not installed
+[ -x $(which $EXEC) ] || echo "$EXEC was not found. Is it installed?"
+[ -x $(which $SUB_EXEC) ] || echo "$SUB_EXEC not found. Is pcs installed?"
+
+# Read configuration variable file if it is present
+[ -r /etc/default/$NAME ] && . /etc/default/$NAME
+
+# Source lsb init functions
+. /lib/lsb/init-functions
+
+is_running()
+{
+  # Test whether pid file exists or not
+  test -f $PIDFILE || return 1
+
+  # Test whether process is running or not
+  read PID < "$PIDFILE"
+  ps -p $PID >/dev/null 2>&1 || return 1
+
+  # Is running
+  return 0
+}
+
+root_only()
+{
+  if [ "$(id -u)" != "0" ]; then
+    echo "Only root should run this operation"
+    exit 1
+  fi
+}
+
+run()
+{
+  if is_running; then
+    PID="$(cat $PIDFILE)"
+    echo "Daemon is already running as PID $PID"
+    return 1
+  fi
+
+  nohup $DAEMON $DAEMON_ARGS > /dev/null 2>&1 &
+  echo $! > $PIDFILE
+  read PID < "$PIDFILE"
+
+  echo "PID is $PID"
+
+  sleep $SLEEP_DURATION
+  if ! is_running; then
+    echo "Daemon died immediately after starting. Please check your logs and configurations."
+    return 1
+  fi
+
+  echo "Daemon is running as PID $PID"
+  return 0
+}
+
+stop()
+{
+  if is_running; then
+    read PID < "$PIDFILE"
+    kill -9 $PID
+  fi
+
+  sleep $SLEEP_DURATION
+  if is_running; then
+    while is_running; do
+      echo "waiting for daemon to die (PID $PID)"
+      sleep $SLEEP_DURATION
+    done
+  fi
+
+  # Be sure to remove the pid file
+  rm -f "$PIDFILE"
+  return 0
+}
+
+case "$1" in
+  start)
+    root_only
+    log_daemon_msg "Starting $DESC"  "$NAME"
+    run
+    log_end_msg $?
+    ;;
+  stop)
+    root_only
+    log_daemon_msg "Stopping $DESC" "$NAME"
+    stop
+    log_end_msg $?
+    ;;
+  restart|force-reload)
+    log_daemon_msg "Restarting $DESC" "$NAME"
+    root_only
+    $0 stop && $0 start
+    ;;
+  status|monitor)
+    status_of_proc \
+      -p "$PIDFILE" \
+      "$SUB_EXEC" \
+      "$NAME" \
+      && exit 0 \
+      || exit $?
+    ;;
+  *)
+    echo "Usage: $0 {start|stop|restart|reload|force-reload|status|monitor}"
+    exit 1
+  ;;
+esac
+
+:
diff --git a/pcsd/pcsd.pam.debian b/pcsd/pcsd.pam.debian
new file mode 100644
index 0000000..4757464
--- /dev/null
+++ b/pcsd/pcsd.pam.debian
@@ -0,0 +1,5 @@
+#%PAM-1.0
+auth       include      common-auth
+account    include      common-account
+password   include      common-password
+session    include      common-session
diff --git a/pcsd/pcsd.rb b/pcsd/pcsd.rb
index 8db8f25..059d3a2 100644
--- a/pcsd/pcsd.rb
+++ b/pcsd/pcsd.rb
@@ -1,7 +1,14 @@
 require 'sinatra'
-require 'sinatra/reloader' if development?  #require 'rack/ssl'
+require 'sinatra/reloader' if development?
 require 'sinatra/cookies'
 require 'rexml/document'
+require 'webrick'
+require 'webrick/https'
+require 'openssl'
+require 'logger'
+require 'thread'
+
+require 'bootstrap.rb'
 require 'resource.rb'
 require 'remote.rb'
 require 'fenceagent.rb'
@@ -10,50 +17,58 @@ require 'config.rb'
 require 'pcs.rb'
 require 'auth.rb'
 require 'wizard.rb'
-require 'webrick'
-require 'pp'
-require 'webrick/https'
-require 'openssl'
-require 'logger'
+require 'cfgsync.rb'
+require 'permissions.rb'
 
 Dir["wizards/*.rb"].each {|file| require file}
 
 use Rack::CommonLogger
 
-COOKIE_FILE = "/var/lib/pcsd/pcsd.cookiesecret"
+set :app_file, __FILE__
+
+def generate_cookie_secret
+  return SecureRandom.hex(30)
+end
 
 begin
   secret = File.read(COOKIE_FILE)
-rescue Errno::ENOENT => e
-  secret = SecureRandom.hex(30)
+  secret_errors = verify_cookie_secret(secret)
+  if secret_errors and not secret_errors.empty?
+    secret_errors.each { |err| $logger.error err }
+    $logger.error "Invalid cookie secret, using temporary one"
+    secret = generate_cookie_secret()
+  end
+rescue Errno::ENOENT
+  secret = generate_cookie_secret()
   File.open(COOKIE_FILE, 'w', 0700) {|f| f.write(secret)}
 end
 
 use Rack::Session::Cookie,
   :expire_after => 60 * 60,
-  :secret => secret
+  :secret => secret,
+  :secure => true, # only send over HTTPS
+  :httponly => true # don't provide to javascript
 
 #use Rack::SSL
 
-Dir["wizards/*.rb"].each {|file| also_reload file}
-also_reload 'resource.rb'
-also_reload 'remote.rb'
-also_reload 'fenceagent.rb'
-also_reload 'cluster.rb'
-also_reload 'config.rb'
-also_reload 'pcs.rb'
-also_reload 'auth.rb'
-also_reload 'wizard.rb'
-
-enable :sessions
+if development?
+  Dir["wizards/*.rb"].each {|file| also_reload file}
+  also_reload 'resource.rb'
+  also_reload 'remote.rb'
+  also_reload 'fenceagent.rb'
+  also_reload 'cluster.rb'
+  also_reload 'config.rb'
+  also_reload 'pcs.rb'
+  also_reload 'auth.rb'
+  also_reload 'wizard.rb'
+  also_reload 'cfgsync.rb'
+end
 
 before do
-  $session = session
-  $cookies = cookies
   if request.path != '/login' and not request.path == "/logout" and not request.path == '/remote/auth'
     protected! 
   end
-  $cluster_name = get_cluster_version()
+  $cluster_name = get_cluster_name()
   @errorval = session[:errorval]
   @error = session[:error]
   session[:errorval] = nil
@@ -61,67 +76,52 @@ before do
 end
 
 configure do
-  PCS_VERSION = "0.9.139"
-  ISRHEL6 = is_rhel6
-  ISSYSTEMCTL = is_systemctl
   DISABLE_GUI = false
-
-  OCF_ROOT = "/usr/lib/ocf"
-  HEARTBEAT_AGENTS_DIR = "/usr/lib/ocf/resource.d/heartbeat/"
-  PACEMAKER_AGENTS_DIR = "/usr/lib/ocf/resource.d/pacemaker/"
-  PENGINE = "/usr/libexec/pacemaker/pengine"
-  CRM_NODE = "/usr/sbin/crm_node"
-  if Dir.pwd == "/var/lib/pcsd"
-    PCS = "/usr/sbin/pcs" 
-  else
-    PCS = "../pcs/pcs" 
-  end
-  CRM_ATTRIBUTE = "/usr/sbin/crm_attribute"
-  COROSYNC = "/usr/sbin/corosync"
-  if ISRHEL6
-    COROSYNC_CMAPCTL = "/usr/sbin/corosync-objctl"
-  else
-    COROSYNC_CMAPCTL = "/usr/sbin/corosync-cmapctl"
-  end
-  COROSYNC_QUORUMTOOL = "/usr/sbin/corosync-quorumtool"
-  CMAN_TOOL = "/usr/sbin/cman_tool"
-  PACEMAKERD = "/usr/sbin/pacemakerd"
-  COROSYNC_CONF = "/etc/corosync/corosync.conf"
-  CLUSTER_CONF = "/etc/cluster/cluster.conf"
-  CIBADMIN = "/usr/sbin/cibadmin"
-  SETTINGS_FILE = "pcs_settings.conf"
-  $user_pass_file = "pcs_users.conf"
-
+  PCS = get_pcs_path(File.expand_path(File.dirname(__FILE__)))
   logger = File.open("/var/log/pcsd/pcsd.log", "a+", 0600)
   STDOUT.reopen(logger)
   STDERR.reopen(logger)
   STDOUT.sync = true
   STDERR.sync = true
-  $logger = Logger.new('/var/log/pcsd/pcsd.log')
-  if ENV['PCSD_DEBUG'] and ENV['PCSD_DEBUG'].downcase == "true" then
-    $logger.level = Logger::DEBUG
-    $logger.info "PCSD Debugging enabled"
-  else
-    $logger.level = Logger::INFO
-  end
-
-  if ISRHEL6
-    $logger.debug "Detected RHEL 6"
-  else
-    $logger.debug "Did not detect RHEL 6"
-  end
-
-  if not defined? $cur_node_name
-    $cur_node_name = `hostname`.chomp
-  end
+  $logger = configure_logger('/var/log/pcsd/pcsd.log')
+  $semaphore_cfgsync = Mutex.new
 end
 
 set :logging, true
 set :run, false
 
+$thread_cfgsync = Thread.new {
+  while true
+    $semaphore_cfgsync.synchronize {
+      $logger.debug('Config files sync thread started')
+      if Cfgsync::ConfigSyncControl.sync_thread_allowed?()
+        begin
+          # do not sync if this host is not in a cluster
+          cluster_name = get_cluster_name()
+          if cluster_name and !cluster_name.empty?()
+            $logger.debug('Config files sync thread fetching')
+            fetcher = Cfgsync::ConfigFetcher.new(
+              PCSAuth.getSuperuserSession(), Cfgsync::get_cfg_classes(),
+              get_corosync_nodes(), cluster_name
+            )
+            cfgs_to_save, _ = fetcher.fetch()
+            cfgs_to_save.each { |cfg_to_save|
+              cfg_to_save.save()
+            }
+          end
+        rescue => e
+          $logger.warn("Config files sync thread exception: #{e}")
+        end
+      end
+      $logger.debug('Config files sync thread finished')
+    }
+    sleep(Cfgsync::ConfigSyncControl.sync_thread_interval())
+  end
+}
+
 helpers do
   def protected!
-    if not PCSAuth.isLoggedIn(session, request.cookies)
+    if not PCSAuth.loginByToken(session, cookies) and not PCSAuth.isLoggedIn(session)
       # If we're on /managec/<cluster_name>/main we redirect
       match_expr = "/managec/(.*)/(.*)"
       mymatch = request.path.match(match_expr)
@@ -130,55 +130,31 @@ helpers do
         on_managec_main = true
       end
 
-      if request.path.start_with?('/remote') or (request.path.match(match_expr) and not on_managec_main)
-	$logger.info "ERROR: Request without authentication"
-	halt [401, '{"notauthorized":"true"}']
+      if request.path.start_with?('/remote') or
+        (request.path.match(match_expr) and not on_managec_main) or
+        '/run_pcs' == request.path or
+        '/clusters_overview' == request.path or
+        request.path.start_with?('/permissions_')
+      then
+        $logger.info "ERROR: Request without authentication"
+        halt [401, '{"notauthorized":"true"}']
       else
-	session[:pre_login_path] = request.path
-	redirect '/login'
+        session[:pre_login_path] = request.path
+        redirect '/login'
       end
     end
   end
 
-  def setup
-    @nodes_online, @nodes_offline = get_nodes()
-    @nodes = {}
-    @nodes_online.each do |i|
-      @nodes[i]  = Node.new(i, i, i, true)
-    end
-    @nodes_offline.each do |i|
-      @nodes[i]  = Node.new(i, i, i, false)
-    end
-
-    if @nodes_online.length == 0
-      @pcs_node_offline = true
-    end
-
-    if params[:node]
-      @cur_node = @nodes[params[:node]]
-      if not @cur_node
-	@cur_node = @nodes.values[0]
-      end
-    else
-      @cur_node = @nodes.values[0]
-    end
-
-    if @nodes.length != 0
-      @loc_dep_allow, @loc_dep_disallow = getLocationDeps(@cur_node)
-    end
-    @nodes = @nodes_online.concat(@nodes_offline)
-  end
-
-  def getParamLine(params)
-    param_line = ""
-    meta_options = ""
+  def getParamList(params)
+    param_line = []
+    meta_options = []
     params.each { |param, val|
       if param.start_with?("_res_paramne_") or (param.start_with?("_res_paramempty_") and val != "")
-	myparam = param.sub(/^_res_paramne_/,"").sub(/^_res_paramempty_/,"")
-	param_line += " #{myparam}=#{val}"
+        myparam = param.sub(/^_res_paramne_/,"").sub(/^_res_paramempty_/,"")
+        param_line << "#{myparam}=#{val}"
       end
       if param == "disabled"
-      	meta_options += " meta target-role=Stopped"
+        meta_options << 'meta' << 'target-role=Stopped'
       end
     }
     return param_line + meta_options
@@ -186,11 +162,169 @@ helpers do
 end
 
 get '/remote/?:command?' do
-  return remote(params,request)
+  return remote(params, request, session)
 end
 
 post '/remote/?:command?' do
-  return remote(params,request)
+  return remote(params, request, session)
+end
+
+post '/run_pcs' do
+  command = params['command'] || '{}'
+  std_in = params['stdin'] || nil
+  begin
+    command_decoded = JSON.parse(command)
+  rescue JSON::ParserError
+    result = {
+      'status' => 'error',
+      'data' => {},
+    }
+    return JSON.pretty_generate(result)
+  end
+  # do not reveal potentialy sensitive information
+  command_decoded.delete('--debug')
+
+  allowed_commands = {
+    ['cluster', 'auth', '...'] => {
+      'only_superuser' => false,
+      'permissions' => nil,
+    },
+    # runs on the local node, check permissions
+    ['cluster', 'corosync'] => {
+      'only_superuser' => false,
+      'permissions' => Permissions::READ,
+    },
+    # runs on a remote node which checks permissions by itself
+    ['cluster', 'corosync', '...'] => {
+      'only_superuser' => false,
+      'permissions' => nil,
+    },
+    ['cluster', 'destroy', '...'] => {
+      'only_superuser' => false,
+      'permissions' => Permissions::FULL,
+    },
+    # runs on the local node, check permissions
+    ['cluster', 'disable'] => {
+      'only_superuser' => false,
+      'permissions' => Permissions::WRITE,
+    },
+    # runs on a remote node which checks permissions by itself
+    ['cluster', 'disable', '...'] => {
+      'only_superuser' => false,
+      'permissions' => nil,
+    },
+    # runs on the local node, check permissions
+    ['cluster', 'enable'] => {
+      'only_superuser' => false,
+      'permissions' => Permissions::WRITE,
+    },
+    # runs on a remote node which checks permissions by itself
+    ['cluster', 'enable', '...'] => {
+      'only_superuser' => false,
+      'permissions' => nil,
+    },
+    ['cluster', 'node', '...'] => {
+      'only_superuser' => false,
+      'permissions' => Permissions::FULL,
+    },
+    ['cluster', 'pcsd-status', '...'] => {
+      'only_superuser' => false,
+      'permissions' => nil,
+    },
+    ['cluster', 'setup', '...'] => {
+      'only_superuser' => true,
+      'permissions' => nil,
+    },
+    # runs on the local node, check permissions
+    ['cluster', 'start'] => {
+      'only_superuser' => false,
+      'permissions' => Permissions::WRITE,
+    },
+    # runs on a remote node which checks permissions by itself
+    ['cluster', 'start', '...'] => {
+      'only_superuser' => false,
+      'permissions' => nil,
+    },
+    # runs on the local node, check permissions
+    ['cluster', 'stop'] => {
+      'only_superuser' => false,
+      'permissions' => Permissions::WRITE,
+    },
+    # runs on a remote node which checks permissions by itself
+    ['cluster', 'stop', '...'] => {
+      'only_superuser' => false,
+      'permissions' => nil,
+    },
+    ['cluster', 'sync', '...'] => {
+      'only_superuser' => false,
+      'permissions' => Permissions::FULL,
+    },
+    ['config', 'restore', '...'] => {
+      'only_superuser' => false,
+      'permissions' => Permissions::FULL,
+    },
+    ['pcsd', 'sync-certificates', '...'] => {
+      'only_superuser' => false,
+      'permissions' => Permissions::FULL,
+    },
+    ['status', 'nodes', 'corosync-id', '...'] => {
+      'only_superuser' => false,
+      'permissions' => Permissions::READ,
+    },
+    ['status', 'nodes', 'pacemaker-id', '...'] => {
+      'only_superuser' => false,
+      'permissions' => Permissions::READ,
+    },
+    ['status', 'pcsd', '...'] => {
+      'only_superuser' => false,
+      'permissions' => nil,
+    },
+  }
+  allowed = false
+  command_settings = {}
+  allowed_commands.each { |cmd, cmd_settings|
+    if command_decoded == cmd \
+      or \
+      (cmd[-1] == '...' and cmd[0..-2] == command_decoded[0..(cmd.length - 2)])
+      then
+        allowed = true
+        command_settings = cmd_settings
+        break
+    end
+  }
+  if !allowed
+    result = {
+      'status' => 'bad_command',
+      'data' => {},
+    }
+    return JSON.pretty_generate(result)
+  end
+
+  if command_settings['only_superuser']
+    if not allowed_for_superuser(session)
+      return 403, 'Permission denied'
+    end
+  end
+  if command_settings['permissions']
+    if not allowed_for_local_cluster(session, command_settings['permissions'])
+      return 403, 'Permission denied'
+    end
+  end
+
+  options = {}
+  options['stdin'] = std_in if std_in
+  std_out, std_err, retval = run_cmd_options(
+    session, options, PCS, *command_decoded
+  )
+  result = {
+    'status' => 'ok',
+    'data' => {
+      'stdout' => std_out.join(""),
+      'stderr' => std_err.join(""),
+      'code' => retval,
+    },
+  }
+  return JSON.pretty_generate(result)
 end
 
 if not DISABLE_GUI
@@ -202,8 +336,7 @@ if not DISABLE_GUI
   end
 
   post '/login' do
-    if PCSAuth.validUser(params['username'],params['password'])
-      session["username"] = params['username']
+    if PCSAuth.loginByPassword(session, params['username'], params['password'])
       # Temporarily ignore pre_login_path until we come up with a list of valid
       # paths to redirect to (to prevent status_all issues)
       #    if session["pre_login_path"]
@@ -211,7 +344,7 @@ if not DISABLE_GUI
       #      session.delete("pre_login_path")
       #      pp "Pre Login Path: " + plp
       #      if plp == "" or plp == "/"
-      #      	plp = '/manage'
+      #        plp = '/manage'
       #      end
       #      redirect plp
       #    else
@@ -223,135 +356,108 @@ if not DISABLE_GUI
     end
   end
 
-  post '/fencerm' do
-    params.each { |k,v|
-      if k.index("resid-") == 0
-        run_cmd(PCS, "resource", "delete", k.gsub("resid-",""))
-      end
-    }
-    redirect "/fencedevices/"
+  get '/manage/?' do
+    @manage = true
+    erb :manage, :layout => :main
   end
 
-  get '/configure/?:page?' do
-    @config_options = getConfigOptions(params[:page])
-    @configuremenuclass = "class=\"active\""
-    erb :configure, :layout => :main
+  get '/clusters_overview' do
+    clusters_overview(params, request, session)
   end
 
-  get '/fencedevices2/?:fencedevice?' do
-    @resources, @groups = getResourcesGroups(true)
-    pp @resources
-
-    if @resources.length == 0
-      @cur_resource = nil
-      @resource_agents = getFenceAgents()
-    else
-      @cur_resource = @resources[0]
-      if params[:fencedevice]
-        @resources.each do |fd|
-          if fd.id == params[:fencedevice]
-            @cur_resource = fd
-            break
-          end
-        end
-      end
-      @cur_resource.options = getResourceOptions(@cur_resource.id)
-      @resource_agents = getFenceAgents(@cur_resource.agentname)
-    end
-    erb :fencedevices, :layout => :main
+  get '/permissions/?' do
+    @manage = true
+    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+    @clusters = pcs_config.clusters.sort { |a, b| a.name <=> b.name }
+    erb :permissions, :layout => :main
   end
 
-  ['/resources2/?:resource?', '/resource_list/?:resource?'].each do |path|
-    get path do
-      @load_data = true
-      @resources, @groups = getResourcesGroups
-      @resourcemenuclass = "class=\"active\""
+  get '/permissions_cluster_form/:cluster/?' do
+    @cluster_name = params[:cluster]
+    @error = nil
+    @permission_types = []
+    @permissions_dependencies = {}
+    @user_types = []
+    @users_permissions = []
+
+    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
 
-      if @resources.length == 0
-        @cur_resource = nil
-        @resource_agents = getResourceAgents()
+    if not pcs_config.is_cluster_name_in_use(@cluster_name)
+      @error = 'Cluster not found'
+    else
+      code, data = send_cluster_request_with_token(
+        session, @cluster_name, 'get_permissions'
+      )
+      if 404 == code
+        @error = 'Cluster is running an old version of pcsd which does not support permissions'
+      elsif 403 == code
+        @error = 'Permission denied'
+      elsif 200 != code
+        @error = 'Unable to load permissions of the cluster'
       else
-        @cur_resource = @resources[0]
-        @cur_resource.options = getResourceOptions(@cur_resource.id)
-        if params[:resource]
-          @resources.each do |r|
-            if r.id == params[:resource]
-              @cur_resource = r
-              @cur_resource.options = getResourceOptions(r.id)
-              break
-            end
+        begin
+          permissions = JSON.parse(data)
+          if permissions['notoken'] or permissions['noresponse']
+            @error = 'Unable to load permissions of the cluster'
+          else
+            @permission_types = permissions['permission_types'] || []
+            @permissions_dependencies = permissions['permissions_dependencies'] || {}
+            @user_types = permissions['user_types'] || []
+            @users_permissions = permissions['users_permissions'] || []
           end
+        rescue JSON::ParserError
+          @error = 'Unable to read permissions of the cluster'
         end
-        @resource_agents = getResourceAgents(@cur_resource.agentname)
-        @ord_dep_before, @ord_dep_after  = getOrderingConstraints(@cur_resource.id)
-        @colo_dep_together, @colo_dep_apart = getColocationConstraints(@cur_resource.id)
-        @enabled_nodes, @disabled_nodes = getLocationConstraints(@cur_resource.id)
-      end
-
-      @nodes_online, @nodes_offline = get_nodes
-
-      if path.start_with? '/resource_list'
-        erb :_resource_list
-      else
-        erb :resource, :layout => :main
       end
     end
+    erb :_permissions_cluster
   end
 
-  get '/nodes/?:node?' do
-    setup()
-    @load_data = true
-    #  @nodemenuclass = "class=\"active\""
-    @resources, @groups = getResourcesGroups
-    #  @resources_running = []
-    #  @resources.each { |r|
-    #    @cur_node && r.nodes && r.nodes.each {|n|
-    #      if n.name == @cur_node.id
-    #	@resources_running << r
-    #      end
-    #    }
-    #  }
-    @resource_agents = getResourceAgents()
-    @stonith_agents = getFenceAgents()
-    #  @nodes = @nodes.sort_by{|k,v|k}
-    erb :nodes, :layout => :main
-  end
-
-  get '/manage/?' do
-    @manage = true
-    pcs_config = PCSConfig.new
-    @clusters = pcs_config.clusters
-    @load_data = true
-    erb :manage, :layout => :main
+  post '/permissions_save/?' do
+    cluster_name = params['cluster_name']
+    params.delete('cluster_name')
+    new_params = {
+      'json_data' => JSON.generate(params)
+    }
+    return send_cluster_request_with_token(
+      session, cluster_name, "set_permissions", true, new_params
+    )
   end
 
   get '/managec/:cluster/main' do
     @cluster_name = params[:cluster]
-    #  @resources, @groups = getResourcesGroups
-    @load_data = true
-    pcs_config = PCSConfig.new
+    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
     @clusters = pcs_config.clusters
-    @resources = []
-    @groups = []
     @nodes = get_cluster_nodes(params[:cluster])
     if @nodes == []
       redirect '/manage/?error=badclustername&errorval=' + params[:cluster] + '#manage'
     end
-    @resource_agents = get_resource_agents_avail() 
-    @stonith_agents = get_stonith_agents_avail() 
-    @config_options = getConfigOptions2(@cluster_name)
+    @resource_agents = get_resource_agents_avail(session)
+    @stonith_agents = get_stonith_agents_avail(session)
+    @config_options = getConfigOptions2(session, @nodes)
 
     erb :nodes, :layout => :main
   end
 
   get '/managec/:cluster/status_all' do
-    status_all(params,get_cluster_nodes(params[:cluster]))
+    status_all(params, request, session, get_cluster_nodes(params[:cluster]))
+  end
+
+  get '/managec/:cluster/cluster_status' do
+    cluster_status_gui(session, params[:cluster])
+  end
+
+  get '/managec/:cluster/overview_cluster' do
+    overview_cluster(params, request, session)
   end
 
   get '/managec/:cluster/?*' do
     raw_data = request.env["rack.input"].read
     if params[:cluster]
-      send_cluster_request_with_token(params[:cluster], "/" + params[:splat].join("/"), false, params, true, raw_data)
+      send_cluster_request_with_token(
+        session, params[:cluster], "/" + params[:splat].join("/"), false, params,
+        true, raw_data
+      )
     end
   end
 
@@ -359,7 +465,9 @@ if not DISABLE_GUI
     raw_data = request.env["rack.input"].read
     if params[:cluster]
       request = "/" + params[:splat].join("/")
-      code, out = send_cluster_request_with_token(params[:cluster], request, true, params, true, raw_data)
+      code, out = send_cluster_request_with_token(
+        session, params[:cluster], request, true, params, true, raw_data
+      )
 
       # backward compatibility layer BEGIN
       # This code correctly remove constraints on pcs/pcsd version 0.9.137 and older
@@ -368,7 +476,10 @@ if not DISABLE_GUI
           "/remove_constraint_rule_remote" => "/resource_cmd/rm_constraint_rule"
       }
       if code == 404 and redirection.key?(request)
-        code, out = send_cluster_request_with_token(params[:cluster], redirection[request], true, params, false, raw_data)
+        code, out = send_cluster_request_with_token(
+          session, params[:cluster], redirection[request], true, params, false,
+          raw_data
+        )
       end
       # bcl END
       return code, out
@@ -377,15 +488,26 @@ if not DISABLE_GUI
 
   get '/manage/:node/?*' do
     if params[:node]
-      return send_request_with_token(params[:node], params[:splat].join("/"), false, {}, false)
+      return send_request_with_token(
+        session, params[:node], params[:splat].join("/"), false, {}, false
+      )
     end
   end
 
   post '/manage/existingcluster' do
-    pcs_config = PCSConfig.new
+    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
     node = params['node-name']
-    code, result = send_request_with_token(node, 'status')
-    status = JSON.parse(result)
+    code, result = send_request_with_token(
+      PCSAuth.getSuperuserSession(), node, 'status'
+    )
+    begin
+      status = JSON.parse(result)
+    rescue JSON::ParserError
+      session[:error] = "genericerror"
+      session[:errorval] = 'Unable to communicate with remote pcsd.'
+      redirect '/manage'
+    end
+
     if status.has_key?("corosync_offline") and
       status.has_key?("corosync_online") then
       nodes = status["corosync_offline"] + status["corosync_online"]
@@ -402,8 +524,48 @@ if not DISABLE_GUI
         redirect '/manage'
       end
 
+      # auth begin
+      retval, out = send_request_with_token(
+        PCSAuth.getSuperuserSession(), node, '/get_cluster_tokens'
+      )
+      if retval == 404 # backward compatibility layer
+        session[:error] = "authimposible"
+      else
+        if retval != 200
+          session[:error] = "cannotgettokens"
+          session[:errorval] = status["cluster_name"]
+          redirect '/manage'
+        end
+        begin
+          new_tokens = JSON.parse(out)
+        rescue
+          session[:error] = "cannotgettokens"
+          session[:errorval] = status["cluster_name"]
+          redirect '/manage'
+        end
+
+        sync_config = Cfgsync::PcsdTokens.from_file('')
+        pushed, _ = Cfgsync::save_sync_new_tokens(
+          sync_config, new_tokens, get_corosync_nodes(), $cluster_name
+        )
+        if not pushed
+          session[:error] = "configversionsconflict"
+          session[:errorval] = sync_config.class.name
+          redirect '/manage'
+        end
+      end
+      #auth end
+
       pcs_config.clusters << Cluster.new(status["cluster_name"], nodes)
-      pcs_config.save
+
+      sync_config = Cfgsync::PcsdSettings.from_text(pcs_config.text())
+      pushed, _ = Cfgsync::save_sync_new_version(
+        sync_config, get_corosync_nodes(), $cluster_name, true
+      )
+      if not pushed
+        session[:error] = 'configversionsconflict'
+        session[:errorval] = sync_config.class.name
+      end
       redirect '/manage'
     else
       redirect '/manage/?error=notauthorized#manage'
@@ -411,15 +573,22 @@ if not DISABLE_GUI
   end
 
   post '/manage/newcluster' do
-    pcs_config = PCSConfig.new
+    if not allowed_for_superuser(session)
+      session[:error] = "permissiondenied"
+      redirect '/manage'
+    end
+
+    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
     @manage = true
     @cluster_name = params[:clustername]
     @nodes = []
+    nodes_with_indexes = []
     @nodes_rrp = []
     options = {}
     params.each {|k,v|
       if k.start_with?("node-") and v != ""
         @nodes << v
+        nodes_with_indexes << [k[5..-1].to_i, v]
         if params.has_key?("ring1-" + k) and params["ring1-" + k] != ""
           @nodes_rrp << v + "," + params["ring1-" + k]
         else
@@ -444,12 +613,63 @@ if not DISABLE_GUI
       end
     }
 
-    $logger.info("Sending setup cluster request for: " + @cluster_name + " to: " + @nodes[0])
-    code,out = send_request_with_token(@nodes[0], "setup_cluster", true, {:clustername => @cluster_name, :nodes => @nodes_rrp.join(';'), :options => options.to_json}, true, nil, 60)
+    # first we need to authenticate nodes to each other
+    tokens = add_prefix_to_keys(get_tokens_of_nodes(@nodes), "node:")
+    @nodes.each {|n|
+      retval, out = send_request_with_token(
+        session, n, "/save_tokens", true, tokens
+      )
+      if retval == 404 # backward compatibility layer
+        session[:error] = "authimposible"
+        break
+      elsif retval != 200
+        session[:error] = "cannotsavetokens"
+        session[:errorval] = n
+        redirect '/manage'
+      end
+    }
+
+    # the first node from the form is the source of config files
+    node_to_send_to = nodes_with_indexes.sort[0][1]
+    $logger.info(
+      "Sending setup cluster request for: #{@cluster_name} to: #{node_to_send_to}"
+    )
+    code,out = send_request_with_token(
+      session,
+      node_to_send_to,
+      'setup_cluster',
+      true,
+      {
+        :clustername => @cluster_name,
+        :nodes => @nodes_rrp.join(';'),
+        :options => options.to_json
+      },
+      true,
+      nil,
+      60
+    )
 
     if code == 200
-      pcs_config.clusters << Cluster.new(@cluster_name, @nodes)
-      pcs_config.save
+      pushed = false
+      2.times {
+        # Add the new cluster to config and publish the config.
+        # If this host is a node of the cluster, some other node may send its
+        # own PcsdSettings.  To handle it we just need to reload the config, as
+        # we are waiting for the request to finish, so no locking is needed.
+        # If we are in a different cluster we just try twice to update the
+        # config, dealing with any updates in between.
+        pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+        pcs_config.clusters << Cluster.new(@cluster_name, @nodes)
+        sync_config = Cfgsync::PcsdSettings.from_text(pcs_config.text())
+        pushed, _ = Cfgsync::save_sync_new_version(
+          sync_config, get_corosync_nodes(), $cluster_name, true
+        )
+        break if pushed
+      }
+      if not pushed
+        session[:error] = 'configversionsconflict'
+        session[:errorval] = Cfgsync::PcsdSettings.name
+      end
     else
       session[:error] = "unabletocreate"
       session[:errorval] = out
@@ -459,14 +679,21 @@ if not DISABLE_GUI
   end
 
   post '/manage/removecluster' do
-    pcs_config = PCSConfig.new
+    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
     params.each { |k,v|
       if k.start_with?("clusterid-")
         pcs_config.remove_cluster(k.sub("clusterid-",""))
       end
     }
-    pcs_config.save
-    redirect '/manage'
+    sync_config = Cfgsync::PcsdSettings.from_text(pcs_config.text())
+    pushed, _ = Cfgsync::save_sync_new_version(
+      sync_config, get_corosync_nodes(), $cluster_name, true
+    )
+    if not pushed
+      session[:error] = 'configversionsconflict'
+      session[:errorval] = sync_config.class.name
+    end
+    # do not reload nor redirect as that's done in js which called this
   end
 
   get '/' do
@@ -500,34 +727,7 @@ else
 
 end
 
-def getLocationDeps(cur_node)
-  out, stderror, retval = run_cmd(PCS, "constraint", "location", "show", "nodes", cur_node.id)
-  deps_allow = []
-  deps_disallow = []
-  allowed = false
-  disallowed = false
-  out.each {|line|
-    line = line.strip
-    next if line == "Location Constraints:" or line.match(/^Node:/)
-
-    if line == "Allowed to run:"
-      allowed = true
-      next
-    elsif line == "Not allowed to run:"
-      disallowed = true
-      next
-    end
-
-    if disallowed == true
-      deps_disallow << line.sub(/ .*/,"")
-    elsif allowed == true
-      deps_allow << line.sub(/ .*/,"")
-    end
-  }  
-  [deps_allow, deps_disallow]
-end
-
-def getConfigOptions2(cluster_name)
+def getConfigOptions2(session, cluster_nodes)
   config_options = {}
   general_page = []
 #  general_page << ConfigOption.new("Cluster Delay Time", "cluster-delay",  "int", 4, "Seconds") 
@@ -565,54 +765,7 @@ If checked, the cluster will refuse to start resources unless one or more STONIT
   allconfigoptions = []
   config_options.each { |i,k| k.each { |j| allconfigoptions << j } }
   ConfigOption.getDefaultValues(allconfigoptions)
-  ConfigOption.loadValues(allconfigoptions,cluster_name)
-  return config_options
-end
-
-def getConfigOptions(page="general")
-  config_options = []
-  case page
-  when "general", nil
-    cg1 = []
-    cg1 << ConfigOption.new("Cluster Delay Time", "cdt",  "int", 4, "Seconds") 
-    cg1 << ConfigOption.new("Batch Limit", "cdt",  "int", 4) 
-    cg1 << ConfigOption.new("Default Action Timeout", "cdt",  "int", 4, "Seconds") 
-    cg2 = []
-    cg2 << ConfigOption.new("During timeout should cluster stop all active resources", "res_stop", "radio", "4", "", ["Yes","No"])
-
-    cg3 = []
-    cg3 << ConfigOption.new("PE Error Storage", "res_stop", "radio", "4", "", ["Yes","No"])
-    cg3 << ConfigOption.new("PE Warning Storage", "res_stop", "radio", "4", "", ["Yes","No"])
-    cg3 << ConfigOption.new("PE Input Storage", "res_stop", "radio", "4", "", ["Yes","No"])
-
-    config_options << cg1
-    config_options << cg2
-    config_options << cg3
-  when "pacemaker"
-    cg1 = []
-    cg1 << ConfigOption.new("Batch Limit", "batch-limit",  "int", 4, "jobs") 
-    cg1 << ConfigOption.new("No Quorum Policy", "no-quorum-policy",  "dropdown","" ,"", {"ignore" => "Ignore","freeze" => "Freeze", "stop" => "Stop", "suicide" => "Suicide"}) 
-    cg1 << ConfigOption.new("Symmetric", "symmetric-cluster", "check")
-    cg2 = []
-    cg2 << ConfigOption.new("Stonith Enabled", "stonith-enabled", "check")
-    cg2 << ConfigOption.new("Stonith Action", "stonith-action",  "dropdown","" ,"", {"reboot" => "Reboot","poweroff" => "Poweroff"}) 
-    cg3 = []
-    cg3 << ConfigOption.new("Cluster Delay", "cluster-delay",  "int", 4) 
-    cg3 << ConfigOption.new("Stop Orphan Resources", "stop-orphan-resources", "check")
-    cg3 << ConfigOption.new("Stop Orphan Actions", "stop-orphan-actions", "check")
-    cg3 << ConfigOption.new("Start Failure is Fatal", "start-failure-is-fatal", "check")
-    cg3 << ConfigOption.new("PE Error Storage", "pe-error-series-max", "int", "4")
-    cg3 << ConfigOption.new("PE Warning Storage", "pe-warn-series-max", "int", "4")
-    cg3 << ConfigOption.new("PE Input Storage", "pe-input-series-max", "int", "4")
-
-    config_options << cg1
-    config_options << cg2
-    config_options << cg3
-  end
-
-  allconfigoptions = []
-  config_options.each { |i| i.each { |j| allconfigoptions << j } }
-  ConfigOption.getDefaultValues(allconfigoptions)
+  ConfigOption.loadValues(session, allconfigoptions, cluster_nodes)
   return config_options
 end
 
@@ -637,8 +790,8 @@ class ConfigOption
     @desc = desc
   end
 
-  def self.loadValues(cos,cluster_name)
-    code,output = send_cluster_request_with_token(cluster_name, "get_cib")
+  def self.loadValues(session, cos, node_list)
+    code, output = send_nodes_request_with_token(session, node_list, "get_cib")
     $logger.info(code)
     if code != 200
       $logger.info "Error: unable to load cib"
@@ -651,24 +804,29 @@ class ConfigOption
     cos.each {|co|
       prop_found = false
       doc.elements.each("cib/configuration/crm_config/cluster_property_set/nvpair[@name='#{co.configname}']") { |e|
-      	co.value = e.attributes["value"]
-      	prop_found = true
+        co.value = e.attributes["value"]
+        prop_found = true
       }
       if prop_found == false
-      	co.value = co.default
+        co.value = co.default
       end
     }
   end
 
   def self.getDefaultValues(cos)
-    metadata = `#{PENGINE} metadata`
-    doc = REXML::Document.new(metadata)
-
-    cos.each { |co|
-      doc.elements.each("resource-agent/parameters/parameter[@name='#{co.configname}']/content") { |e|
-	co.default = e.attributes["default"]
-	break
-      }
+    [PENGINE, CIB_BINARY].each { |command|
+      metadata = `#{command} metadata`
+      begin
+        doc = REXML::Document.new(metadata)
+        cos.each { |co|
+          doc.elements.each("resource-agent/parameters/parameter[@name='#{co.configname}']/content") { |e|
+            co.default = e.attributes["default"]
+            break
+          }
+        }
+      rescue
+        $logger.error("Failed to parse #{command} metadata")
+      end
     }
   end
 
@@ -677,23 +835,23 @@ class ConfigOption
     when "radio"
       val = value
       if option == "Yes"
-	if val == "true"
-	  return "checked"
-	end
+        if val == "true"
+          return "checked"
+        end
       else
-	if val == "false"
-	  return "checked"
-	end
+        if val == "false"
+          return "checked"
+        end
       end
     when "check"
       if value == "true" || value == "on"
-	return "checked"
+        return "checked"
       else
-	return ""
+        return ""
       end
     when "dropdown"
       if value == option
-	return "selected"
+        return "selected"
       end
     end
   end
@@ -709,7 +867,7 @@ class ConfigOption
     when "radio"
       ret = ""
       options.each {|option|
-	ret += "<input type=radio #{checked(option)} name=\"#{paramname}\" value=\"#{option}\">#{option}"
+        ret += "<input type=radio #{checked(option)} name=\"#{paramname}\" value=\"#{option}\">#{option}"
       }
       return ret
     when "check"
@@ -719,9 +877,9 @@ class ConfigOption
     when "dropdown"
       ret = "<select name=\"#{paramname}\">"
       options.each {|key, option|
-	ret += "<option #{checked(key)} value=\"#{key}\">#{option}</option>"
+        ret += "<option #{checked(key)} value=\"#{key}\">#{option}</option>"
       }
-      ret += "<select"
+      ret += "</select>"
       return ret
     end
   end
diff --git a/pcsd/pcsd.service b/pcsd/pcsd.service
index 5254905..075a3a6 100644
--- a/pcsd/pcsd.service
+++ b/pcsd/pcsd.service
@@ -2,11 +2,9 @@
 Description=PCS GUI and remote configuration interface
 
 [Service]
-EnvironmentFile=-/etc/sysconfig/pcsd
-WorkingDirectory=/var/lib/pcsd
-ExecStart=/usr/lib/pcsd/pcsd start
-Type=notify
-NotifyAccess=all
+EnvironmentFile=/etc/sysconfig/pcsd
+Environment=GEM_HOME=/usr/lib/pcsd/vendor/bundle/ruby
+ExecStart=/usr/bin/ruby -C/var/lib/pcsd -I/usr/lib/pcsd -- /usr/lib/pcsd/ssl.rb > /dev/null &
 
 [Install]
 WantedBy=multi-user.target
diff --git a/pcsd/pcsd.service.debian b/pcsd/pcsd.service.debian
new file mode 100644
index 0000000..0892773
--- /dev/null
+++ b/pcsd/pcsd.service.debian
@@ -0,0 +1,9 @@
+[Unit]
+Description=PCS GUI and remote configuration interface
+
+[Service]
+EnvironmentFile=/etc/default/pcsd
+ExecStart=/usr/bin/ruby -C/var/lib/pcsd -I/usr/share/pcsd -- /usr/share/pcsd/ssl.rb & > /dev/null &
+
+[Install]
+WantedBy=multi-user.target
diff --git a/pcsd/permissions.rb b/pcsd/permissions.rb
new file mode 100644
index 0000000..7601865
--- /dev/null
+++ b/pcsd/permissions.rb
@@ -0,0 +1,180 @@
+require 'orderedhash'
+
+module Permissions
+
+  TYPE_USER = 'user'
+  TYPE_GROUP = 'group'
+
+  READ = 'read'
+  WRITE = 'write'
+  GRANT = 'grant'
+  FULL = 'full'
+
+  def self.get_user_types()
+    return [
+      {
+        'code' => TYPE_USER,
+        'label' => 'User',
+        'description' => '',
+      },
+      {
+        'code' => TYPE_GROUP,
+        'label' => 'Group',
+        'description' => '',
+      }
+    ]
+  end
+
+  def self.get_permission_types()
+    return [
+      {
+        'code' => READ,
+        'label' => 'Read',
+        'description' => 'Allows to view cluster settings',
+      },
+      {
+        'code' => WRITE,
+        'label' => 'Write',
+        'description' => 'Allows to modify cluster settings except permissions and ACLs',
+      },
+      {
+        'code' => GRANT,
+        'label' => 'Grant',
+        'description' => 'Allows to modify cluster permissions and ACLs',
+      },
+      {
+        'code' => FULL,
+        'label' => 'Full',
+        'description' => ('Allows unrestricted access to a cluster including '\
+          + 'adding and removing nodes and access to keys and certificates'),
+      }
+    ]
+  end
+
+  def self.is_user_type(type)
+    return [TYPE_USER, TYPE_GROUP].include?(type)
+  end
+
+  def self.is_permission_type(permission)
+    return [READ, WRITE, GRANT, FULL].include?(permission)
+  end
+
+  def self.permissions_dependencies()
+    return {
+      'also_allows' => {
+        WRITE => [READ],
+        FULL => [READ, WRITE, GRANT],
+      },
+    }
+  end
+
+  class EntityPermissions
+    attr_reader :type, :name, :allow_list
+
+    def initialize(type, name, allow_list)
+      # possibility to add deny_list
+      @type = type
+      @name = name
+      @allow_list = allow_list.uniq
+    end
+
+    def applies_to(type, name)
+      return (type == @type and name == @name)
+    end
+
+    def allows?(action)
+      # - possibility to extend to more elaborate evaluation
+      #   e.g. "read" allows both "read_nodes" and "read_resources"
+      # - possibility to add deny_list
+      if @allow_list.include?(action)
+        return true
+      else
+        deps = Permissions.permissions_dependencies()
+        deps['also_allows'].each { |new_action, also_allows|
+          if also_allows.include?(action) and @allow_list.include?(new_action)
+            return true
+          end
+        }
+      end
+      return false
+    end
+
+    def merge!(other)
+      @allow_list = (@allow_list + other.allow_list).uniq
+    end
+
+    def to_hash()
+      perm_hash = OrderedHash.new
+      perm_hash['type'] = @type
+      perm_hash['name'] = @name
+      perm_hash['allow'] = @allow_list.uniq.sort
+      return perm_hash
+    end
+  end
+
+  class PermissionsSet
+    def initialize(entity_permissions_list)
+      @permissions = {
+        TYPE_USER => {},
+        TYPE_GROUP => {},
+      }
+      entity_permissions_list.each{ |perm|
+        if not @permissions.key?(perm.type)
+          @permissions[perm.type] = {}
+        end
+        if @permissions[perm.type][perm.name]
+          @permissions[perm.type][perm.name].merge!(perm)
+        else
+          @permissions[perm.type][perm.name] = perm
+        end
+      }
+    end
+
+    def entity_permissions_list()
+      return @permissions.values.collect { |perm| perm.values }.flatten
+    end
+
+    def to_hash()
+      perm_set = []
+      entity_permissions_list.each { |perm|
+        perm_set << perm.to_hash()
+      }
+      return perm_set.sort { |a, b|
+        a['type'] == b['type'] ? a['name'] <=> b['name'] : a['type'] <=> b['type']
+      }
+    end
+
+    def allows?(username, groups, action)
+      $logger.debug(
+        "permission check action=#{action} username=#{username} groups=#{groups.join(' ')}"
+      )
+
+      if ::SUPERUSER == username
+        $logger.debug('permission granted for superuser')
+        return true
+      end
+
+      if @permissions[TYPE_USER].key?(username)
+        if @permissions[TYPE_USER][username].allows?(action)
+          $logger.debug("permission granted for user #{username}")
+          return true
+        end
+      end
+
+      groups.each { |group|
+        if (
+          @permissions[TYPE_GROUP].key?(group)\
+          and\
+          @permissions[TYPE_GROUP][group].allows?(action)
+        )
+          $logger.debug("permission granted for group #{group}")
+          return true
+        end
+      }
+
+      $logger.debug('permission denied')
+      return false
+    end
+  end
+
+end
diff --git a/pcsd/public/css/style.css b/pcsd/public/css/style.css
index 0a62910..1c003bd 100644
--- a/pcsd/public/css/style.css
+++ b/pcsd/public/css/style.css
@@ -55,7 +55,6 @@ table {
 #header #logo {
   padding-top: 20px;
   padding-right: 15px;
-  float: left;
 }
 
 td.right {
@@ -623,6 +622,10 @@ tr td a.remove {
 .xdark { background-position: -32px -17px; }
 .plus { background-position: -48px -1px; }
 .check { background-position: -448px -1px; }
+.warning { background-position: -432px -1px; }
+.warningdark { background-position: -432px -17px; }
+.error { background-position: -384px -1px; }
+.errordark { background-position: -384px -17px; }
 .checkdark { background-position: -448px -17px; }
 .restart { background-position: -272px -17px; }
 .standby { background-position: -352px -16px; }
@@ -723,7 +726,7 @@ li.menuheader {
 }
 
 .node_selected {
-  background: #e4f5fd;
+  background-color: #e4f5fd;
 }
 
 .ui-progressbar .ui-progressbar-value {
@@ -751,3 +754,35 @@ li.menuheader {
 #new_stonith_agent .long_desc_div {
   max-width: 350px !important;
 }
+
+.hidden {
+    display: none;
+}
+
+.element_name {
+  height: 25px;
+}
+.element_name div div {
+  margin-top: 3px;
+  height: 100%;
+}
+
+.tree-view {
+  width: 450px;
+}
+
+.mouse_on_row {
+  background-color: #e4f5fd;
+}
+
+.issue_table {
+  margin-top: 1.5em;
+}
+
+.status-error {
+  color: red;
+}
+
+.status-warning {
+  color: #ff6600;
+}
diff --git a/pcsd/public/js/nodes-ember.js b/pcsd/public/js/nodes-ember.js
index 0871f68..c3e8e80 100644
--- a/pcsd/public/js/nodes-ember.js
+++ b/pcsd/public/js/nodes-ember.js
@@ -30,105 +30,931 @@ Pcs = Ember.Application.createWithMixins({
     if (this.cur_page == "manage") return "display: table-row;";
     else return "display: none;";
   }.property("cur_page"),
+  permissions_page: function() {
+    if (this.cur_page == "permissions") return "display: table-row;";
+    else return "display: none;";
+  }.property("cur_page"),
   wizards_page: function() {
     if (this.cur_page == "wizards") return "display: table-row;";
     else return "display: none;";
   }.property("cur_page"),
 
   getResourcesFromID: function(resources) {
-    retArray = [];
-    for (var i=0; i < resources.length; i++) {
-      $.each(this.resourcesController.content, function(ind,v) { 
-	if (v.name == resources[i] && v.stonith == false) {
-      	  retArray.push(v);
-	}
-      });
-    }
+    var retArray = [];
+    var resource_map = Pcs.resourcesContainer.get('resource_map');
+    $.each(resources, function(_, resource_id) {
+      if (resource_id in resource_map && !resource_map[resource_id].get('stonith')) {
+        retArray.pushObject(resource_map[resource_id]);
+      }
+    });
     return retArray;
   },
-  update_timeout: null,
-  update: function(first_run) {
-    if (first_run)
+  updater: null,
+
+  update: function() {
+    Pcs.get('updater').update();
+  },
+
+  _update: function(first_run) {
+    if (window.location.pathname.lastIndexOf('/manage', 0) !== 0) {
+      return;
+    }
+    if (first_run) {
       show_loading_screen();
-    if (this.cluster_name == null) {
+    }
+    var self = Pcs;
+    var cluster_name = self.cluster_name;
+    if (cluster_name == null) {
+      if (location.pathname.indexOf("/manage") != 0) {
+        return;
+      }
       Ember.debug("Empty Cluster Name");
-      hide_loading_screen();
+      $.ajax({
+        url: "/clusters_overview",
+        dataType: "json",
+        timeout: 20000,
+        success: function(data) {
+          Pcs.clusterController.update(data);
+          if (Pcs.clusterController.get('cur_cluster')) {
+            Pcs.clusterController.update_cur_cluster(Pcs.clusterController.get('cur_cluster').get('name'));
+          }
+          if (data["not_current_data"]) {
+            self.update();
+          }
+          hide_loading_screen();
+        },
+        error: function(jqhxr,b,c) {
+          if (jqhxr.responseText) {
+            try {
+              var obj = $.parseJSON(jqhxr.responseText);
+              if (obj.notauthorized == "true") {
+                location.reload();
+              }
+            } catch(e) {
+              console.log("Error: Unable to parse json for clusters_overview");
+            }
+          }
+          hide_loading_screen();
+        },
+        complete: function() {
+          Pcs.get('updater').update_finished();
+        }
+      });
       return;
     }
     $.ajax({
-      url:  "status_all",
-//      url: "/test_status.json",
+      url: "cluster_status",
       dataType: "json",
       success: function(data) {
-	Pcs.resourcesController.update(data);
-	Pcs.nodesController.update(data);
-	Pcs.settingsController.update(data);
-	Pcs.aclsController.update(data);
-	Pcs.set("cluster_settings",data[Object.keys(data)[0]].cluster_settings);
-        Pcs.set('need_ring1_address', false)
-        Pcs.set('is_cman_with_udpu_transport', false)
-        $.each(data, function(node, node_data) {
-          $.each(node_data, function(key, value) {
-            if (key == 'need_ring1_address' && value) {
-              Pcs.set('need_ring1_address', true);
+        Pcs.resourcesContainer.update(data);
+        Pcs.nodesController.update(data);
+        Pcs.settingsController.update(data);
+        Pcs.aclsController.update(data);
+        Pcs.set("cluster_settings",data.cluster_settings);
+        Pcs.set('need_ring1_address', false);
+        Pcs.set('is_cman_with_udpu_transport', false);
+        if (data['need_ring1_address']) {
+          Pcs.set('need_ring1_address', true);
+        }
+        if (data['is_cman_with_udpu_transport']) {
+          Pcs.set('is_cman_with_udpu_transport', true);
+        }
+        var fence_change = false;
+        var resource_change = false;
+        Ember.run.next(function () {
+          var self = Pcs.resourcesContainer;
+          var cur_fence = self.get('cur_fence');
+          var cur_resource = self.get('cur_resource');
+          var resource_map = self.get('resource_map');
+          if (first_run) {
+            setup_node_links();
+            Pcs.nodesController.load_node($('#node_list_row').find('.node_selected').first(),true);
+            Pcs.aclsController.load_role($('#acls_list_row').find('.node_selected').first(), true);
+            if (self.get("fence_id_to_load")) {
+              cur_fence = self.get_resource_by_id(self.get("fence_id_to_load"));
+              fence_change = true;
+            }
+            if (self.get("resource_id_to_load")) {
+              cur_resource = self.get_resource_by_id(self.get("resource_id_to_load"));
+              resource_change = true;
+            }
+          }
+
+          if (cur_fence && cur_fence.get('id') in resource_map) {
+            if (resource_map[cur_fence.get('id')] !== cur_fence) {
+              cur_fence = resource_map[cur_fence.get('id')];
+            }
+          } else {
+            if (self.get('fence_list').length > 0) {
+              cur_fence = self.get('fence_list')[0];
+            } else {
+              cur_fence = null;
+            }
+            fence_change = true;
+          }
+
+          if (cur_resource && cur_resource.get('id') in resource_map) {
+            if (resource_map[cur_resource.get('id')] !== cur_resource) {
+              cur_resource = resource_map[cur_resource.get('id')];
+            }
+          } else {
+            if (self.get('resource_list').length > 0) {
+              cur_resource = self.get('resource_list')[0];
+            } else {
+              cur_resource = null;
             }
-            if (key == 'is_cman_with_udpu_transport' && value) {
-              Pcs.set('is_cman_with_udpu_transport', true);
+            resource_change = true;
+          }
+
+          self.set('cur_fence', cur_fence);
+          self.set('cur_resource', cur_resource);
+
+          Ember.run.scheduleOnce('afterRender', Pcs, function () {
+            if (self.get('cur_fence')) {
+              if (fence_change)
+                tree_view_onclick(self.get('cur_fence').get('id'), true);
+              else
+                tree_view_select(self.get('cur_fence').get('id'));
             }
+            if (self.get('cur_resource')) {
+              if (resource_change)
+                tree_view_onclick(self.get('cur_resource').get('id'), true);
+              else
+                tree_view_select(self.get('cur_resource').get('id'));
+            }
+            Pcs.selectedNodeController.reset();
+            disable_checkbox_clicks();
           });
         });
-	Ember.run.next(this,disable_checkbox_clicks);
-	if (first_run) {
-	    Ember.run.next(this,function () {
-	      Pcs.resourcesController.load_resource($('#resource_list_row').find('.node_selected').first(),true);
-	      Pcs.resourcesController.load_stonith($('#stonith_list_row').find('.node_selected').first(),true);
-	      Pcs.nodesController.load_node($('#node_list_row').find('.node_selected').first(),true);
-        Pcs.aclsController.load_role($('#acls_list_row').find('.node_selected').first(), true);
-	    });
-	    Pcs.selectedNodeController.reset();
-	    setup_node_links();
-	    setup_resource_links();
-	} 
-	hide_loading_screen();
-	clearTimeout(Pcs.update_timeout);
-	Pcs.update_timeout = window.setTimeout(Pcs.update,20000);
       },
       error: function(jqhxr,b,c) {
-	if (jqhxr.responseText) {
-	  try {
-	    var obj = $.parseJSON(jqhxr.responseText);
-	    if (obj.notauthorized == "true") {
-	      location.reload();
-	    }
-	  } catch(e) {
-	    console.log("Error: Unable to parse json for status_all")
-	  }
-	}
-	hide_loading_screen();
+        try {
+          var obj = $.parseJSON(jqhxr.responseText);
+          if (obj.notauthorized == "true") {
+            location.reload();
+          }
+        } catch(e) {
+          console.log("Error: Unable to parse json for cluster_status")
+        }
+      },
+      complete: function() {
+        hide_loading_screen();
+        Pcs.get('updater').update_finished();
+      }
+    });
+  }
+});
+
+Pcs.UtilizationTableComponent = Ember.Component.extend({
+  entity: null,
+  type: "node", // node or resource
+  form_id: Ember.computed("type", function() {
+    return "new_" + this.get("type") + "_utilization";
+  }),
+  show_content: false,
+  utilization: [],
+  last_count: 0,
+  util_count: function() {
+    var l = 0;
+    if (this.utilization) {
+      l = this.utilization.length;
+    }
+    //this is needed for not showing/hiding table on each update
+    if (this.last_count != l) {
+      if (l > 0) {
+        this.set('show_content', true);
+      } else {
+        this.set('show_content', false);
+      }
+    }
+    this.set("last_count", l);
+    return l;
+  }.property("utilization"),
+  actions: {
+    toggleBody: function() {
+      this.toggleProperty('show_content');
+    },
+    remove: function(name) {
+      set_utilization(this.type, this.entity.get("id"), name, "");
+    },
+    add: function(form_id) {
+      var id = "#" + form_id;
+      var name = $(id + " input[name='new_utilization_name']").val();
+      if (name == "") {
+        return;
+      }
+      var value = $(id + " input[name='new_utilization_value']").val().trim();
+      if (!is_integer(value)) {
+        alert("Value of utilization attribute has to be integer.");
+        return;
+      }
+      set_utilization(
+        this.type,
+        this.entity.get("id"),
+        name,
+        value
+      );
+      fade_in_out($(id));
+      $(id + " input").val("");
+    }
+  }
+});
+
+Pcs.Updater = Ember.Object.extend({
+  timeout: 20000,
+  first_run: true,
+  async: true,
+  autostart: true,
+  started: false,
+  in_progress: false,
+  waiting: false,
+  update_function: null,
+  update_target: null,
+  timer: null,
+
+  start: function() {
+    this.set('started', true);
+    this.update();
+  },
+
+  stop: function() {
+    this.set('started', false);
+    this.cancel_timer();
+  },
+
+  cancel_timer: function() {
+    var self = this;
+    var timer = self.get('timer');
+    if (timer) {
+      self.set('timer', null);
+      Ember.run.cancel(timer);
+    }
+  },
+
+  update: function() {
+    var self = this;
+    if (!self.get('update_function')) {
+      console.log('No update_function defined!');
+      return;
+    }
+    self.cancel_timer();
+    self.set('waiting', false);
+    if (self.get('in_progress')) {
+      self.set('waiting', true);
+    } else {
+      self.set('in_progress', true);
+      self.get('update_function').apply(self.get('update_target'), [self.get('first_run')]);
+      self.set('first_run', false);
+      if (!self.get('async')) {
+        self.update_finished();
+      }
+    }
+  },
+
+  update_finished: function() {
+    var self = this;
+    if (self.get('waiting')) {
+      Ember.run.next(self, self.update);
+    } else if (self.get('started')) {
+      self.set('timer', Ember.run.later(self, self.update, self.get('timeout')));
+    }
+    self.set('in_progress', false);
+  },
+
+  init: function() {
+    var self = this;
+    if (!self.get('update_target')) {
+      self.set('update_target', self);
+    }
+    if (self.get('autostart')) {
+      self.start();
+    }
+  }
+});
+
+Pcs.resourcesContainer = Ember.Object.create({
+  resource_map: {},
+  top_level_resource_map: {},
+  fence_list: [],
+  resource_list: [],
+  resource_id_to_load: null,
+  fence_id_to_load: null,
+  cur_resource: null,
+  cur_fence: null,
+  constraints: {},
+  group_list: [],
+  data_version: null,
+
+  get_resource_by_id: function(resource_id) {
+    var resource_map = this.get('resource_map');
+    if (resource_id in resource_map)
+      return resource_map[resource_id];
+    return null;
+  },
+
+  get_family_list: function(parent) {
+    var family = [];
+    family.push(parent);
+    switch (parent["class_type"]) {
+      case "group":
+        $.each(parent.get('members'), function(index, member) {
+          family = family.concat(Pcs.resourcesContainer.get_family_list(member));
+        });
+        break;
+      case "clone":
+      case "master":
+        family = family.concat(Pcs.resourcesContainer.get_family_list(parent.get('member')));
+        break;
+    }
+    return family;
+  },
+
+  get_constraints: function(cons) {
+    var ord_con = {};
+    var loc_con = {};
+    var col_con = {};
+    var ord_set_con = {};
+    var res_loc_constraints = {};
+    var res_ord_constraints = {};
+    var res_ord_set_constraints = {};
+    var res_col_constraints = {};
+    if (cons) {
+      if (cons["rsc_location"]) {
+        $.each(cons["rsc_location"], function (key, value) {
+          loc_con[value["id"]] = value;
+        });
+      }
+      if (cons["rsc_order"]) {
+        $.each(cons["rsc_order"], function (key, value) {
+          if (value["sets"]) {
+            ord_set_con[value["id"]] = value;
+          }
+          else {
+            ord_con[value["id"]] = value;
+          }
+        });
+      }
+      if (cons["rsc_colocation"]) {
+        $.each(cons["rsc_colocation"], function (key, value) {
+          col_con[value["id"]] = value;
+        });
+      }
+    }
+
+    $.each(loc_con, function (key, value) {
+      res_loc_constraints[value["rsc"]] = res_loc_constraints[value["rsc"]] || [];
+      res_loc_constraints[value["rsc"]].push(value);
+    });
+    $.each(ord_con, function (key, value) {
+      first = $.extend({"other_rsc":value["then"],"before":false}, value);
+      if (value["first"] in res_ord_constraints)
+        res_ord_constraints[value["first"]].push(first);
+      else res_ord_constraints[value["first"]] = [first];
+      then = $.extend({"other_rsc":value["first"],"before":true}, value);
+      if (value["then"] in res_ord_constraints)
+        res_ord_constraints[value["then"]].push(then);
+      else res_ord_constraints[value["then"]] = [then];
+    });
+
+    $.each(ord_set_con, function(key, set_con) {
+      $.each(set_con["sets"], function(key, set) {
+        $.each(set["resources"], function(key, resource) {
+          res_ord_set_constraints[resource] = res_ord_set_constraints[resource] || [];
+          if (res_ord_set_constraints[resource].indexOf(set_con) != -1) {
+            return;
+          }
+          res_ord_set_constraints[resource].push(set_con);
+        })
+      })
+    });
+
+    $.each(col_con, function (key, value) {
+      if (value["score"] == "INFINITY")
+        value["together"] = "Together";
+      else if (value["score"] == "-INFINITY" || value["score"] < 0)
+        value["together"] = "Apart";
+      else if (value["score"] >= 0)
+        value["together"] = "Together";
+
+      first = $.extend({"other_rsc":value["with-rsc"],"first":true}, value);
+      if (value["rsc"] in res_col_constraints)
+        res_col_constraints[value["rsc"]].push(first);
+      else res_col_constraints[value["rsc"]] = [first];
+      second = $.extend({"other_rsc":value["rsc"],"first":false}, value);
+      if (value["with-rsc"] in res_col_constraints)
+        res_col_constraints[value["with-rsc"]].push(second);
+      else res_col_constraints[value["with-rsc"]] = [second];
+    });
+    return {
+      "location_constraints": res_loc_constraints,
+      "ordering_constraints": res_ord_constraints,
+      "ordering_set_constraints": res_ord_set_constraints,
+      "colocation_constraints": res_col_constraints
+    };
+  },
+
+  update_meta_attr: function(resource_id, attr, value) {
+    value = typeof value !== 'undefined' ? value.trim() : "";
+    var data = {
+      res_id: resource_id,
+      key: attr,
+      value: value
+    };
+
+    $.ajax({
+      type: 'POST',
+      url: get_cluster_remote_url() + 'add_meta_attr_remote',
+      data: data,
+      timeout: pcs_timeout,
+      error: function (xhr, status, error) {
+        alert(
+          "Unable to update meta attribute '" + attr + "' "
+          + ajax_simple_error(xhr, status, error)
+        );
+      },
+      complete: function() {
+        Pcs.update();
       }
+    });
+  },
+
+  enable_resource: function(resource_id) {
+    $.ajax({
+      type: 'POST',
+      url: get_cluster_remote_url() + 'resource_start',
+      data: {resource: resource_id},
+      timeout: pcs_timeout,
+      success: function(data) {
+        if (data['error']) {
+          alert("Unable to enable resource '" + resource_id + "': (" + data['stderr'] + ")");
+        }
+      },
+      error: function(xhr, status, error) {
+        alert(
+          "Unable to enable resource '" + resource_id + "' "
+          + ajax_simple_error(xhr, status, error)
+        );
+      },
+      complete: function() {
+        Pcs.update();
+      }
+    });
+  },
+
+  disable_resource: function(resource_id) {
+    $.ajax({
+      type: 'POST',
+      url: get_cluster_remote_url() + 'resource_stop',
+      data: {resource: resource_id},
+      timeout: pcs_timeout,
+      success: function(data) {
+        if (data['error']) {
+          alert("Unable to disable resource '" + resource_id + "': (" + data['stderr'] + ")");
+        }
+      },
+      error: function(xhr, status, error) {
+        alert(
+          "Unable to disable resource '" + resource_id + "' "
+          + ajax_simple_error(xhr, status, error)
+        );
+      },
+      complete: function() {
+        Pcs.update();
+      }
+    });
+  },
+
+  delete_resources: function(type, resource_list) {
+    var self = this;
+    var list = self.get(type);
+    $.each(resource_list, function(i, resource) {
+      list.removeObject(resource);
+    });
+  },
 
+  delete_unused_resources: function(type, used_map) {
+    var self = this;
+    var to_delete = [];
+    var list = self.get(type);
+    $.each(list, function(i, resource) {
+      if (!(resource.get('id') in used_map)) {
+        to_delete.push(resource);
+      }
+    });
+    self.delete_resources(type, to_delete);
+  },
+
+  update: function(data) {
+    var self = this;
+    self.set('group_list', data['groups']);
+    self.set("data_version", data['status_version']);
+    var resources = data["resource_list"];
+    var resource_obj = null;
+    var resource_id;
+    var new_resource_map = {};
+    var top_resource_map = {};
+    $.each(resources, function(index, resource) {
+      var update = false;
+      resource_id = resource.id;
+      if (resource_id in self.get('top_level_resource_map')) {
+        resource_obj = self.get('top_level_resource_map')[resource_id];
+        resource_obj.update(resource_obj, resource);
+        update = true;
+      } else {
+        switch (resource["class_type"]) {
+          case "primitive":
+            resource_obj = Pcs.PrimitiveObj.create(resource);
+            break;
+          case "group":
+            resource_obj = Pcs.GroupObj.create(resource);
+            break;
+          case "clone":
+            resource_obj = Pcs.CloneObj.create(resource);
+            break;
+          case "master":
+            resource_obj = Pcs.MasterSlaveObj.create(resource);
+            break;
+        }
+      }
+
+      top_resource_map[resource_obj.get('id')] = resource_obj;
+      $.each(self.get_family_list(resource_obj), function(index, resource) {
+        new_resource_map[resource.get('id')] = resource;
+      });
+
+      if (!update) {
+        if (resource_obj.stonith) {
+          self.get('fence_list').pushObject(resource_obj);
+        } else {
+          self.get('resource_list').pushObject(resource_obj);
+        }
+      }
+    });
+
+    self.set('top_level_resource_map', top_resource_map);
+    self.set('resource_map', new_resource_map);
+
+    self.delete_unused_resources("fence_list", top_resource_map);
+    self.delete_unused_resources("resource_list", top_resource_map);
+
+    var constraints = self.get_constraints(data["constraints"]);
+    self.set('constraints', constraints);
+    var resource_map = self.get('resource_map');
+    update_resource_form_groups($("#new_resource_agent"), self.get('group_list').sort());
+    $.each(constraints, function(const_type, cons) {
+      $.each(resource_map, function(resource_id, resource_obj) {
+        if (resource_id in cons) {
+          resource_obj.set(const_type, cons[resource_id]);
+        } else {
+          resource_obj.set(const_type, []);
+        }
+      });
+    });
+    $.each(resource_map, function(resource_id, resource_obj) {
+      resource_obj.set('group_list', self.get('group_list'));
     });
+    self.set('resource_list', Ember.copy(self.get('resource_list')).sort(function(a,b){return a.get('id').localeCompare(b.get('id'))}));
+    self.set('fence_list', Ember.copy(self.get('fence_list')).sort(function(a,b){return a.get('id').localeCompare(b.get('id'))}));
   }
 });
 
+Pcs.resourcesContainer.reopen({
+  is_version_1: function() {
+    return (this.get("data_version") == '1');
+  }.property('data_version')
+});
+
+Pcs.ResourceObj = Ember.Object.extend({
+  id: null,
+  _id: Ember.computed.alias('id'),
+  name: Ember.computed.alias('id'),
+  parent: null,
+  meta_attr: [],
+  meta_attributes: Ember.computed.alias('meta_attr'),
+  disabled: false,
+  error_list: [],
+  warning_list: [],
+  group_list: [],
+  get_group_id: function() {
+    var self = this;
+    var p = self.get('parent');
+    if (p && p.get('class_type') == 'group') {
+      return p.get('id');
+    }
+    return null;
+  }.property('parent'),
+  group_selector: function() {
+    var self = this;
+    var cur_group = self.get('get_group_id');
+    var html = '<select>\n<option value="">None</option>\n';
+    $.each(self.get('group_list'), function(_, group) {
+      html += '<option value="' + group + '"';
+      if (cur_group === group) {
+        html += 'selected';
+      }
+      html += '>' + group + '</option>\n';
+    });
+    html += '</select><input type="button" value="Change group" onclick="resource_change_group(curResource(), $(this).prev().prop(\'value\'));">';
+    return html;
+  }.property('group_list', 'get_group_id'),
+  status: "unknown",
+  class_type: null, // property to determine type of the resource
+  resource_type: function() { // this property is just for displaying resource type in GUI
+    var t = this.get("class_type");
+    return t[0].toUpperCase() + t.slice(1);
+  }.property("class_type"),
+  res_type: Ember.computed.alias('resource_type'),
+  status_icon: function() {
+    var icon_class = get_status_icon_class(this.get("status_val"));
+    return "<div style=\"float:left;margin-right:6px;height:16px;\" class=\"" + icon_class + " sprites\"></div>";
+  }.property("status_val"),
+  status_val: function() {
+    var status_val = get_status_value(this.get('status'));
+    if (this.get('warning_list').length && status_val != get_status_value('disabled'))
+      status_val = get_status_value("warning");
+    if (this.get('error_list').length)
+      status_val = get_status_value("error");
+    if ((get_status_value(this.get('status')) - status_val) < 0) {
+      return get_status_value(this.get('status'));
+    } else {
+      return status_val;
+    }
+  }.property('status', 'error_list. at each.message', 'warning_list. at each.message'),
+  status_color: function() {
+    return get_status_color(this.get("status_val"));
+  }.property("status_val"),
+  status_style: function() {
+    var color = get_status_color(this.get("status_val"));
+    return "color: " + color + ((color != "green")? "; font-weight: bold;" : "");
+  }.property("status_val"),
+  show_status: function() {
+    return '<span style="' + this.get('status_style') + '">' + this.get('status') + '</span>';
+  }.property("status_style", "disabled"),
+  status_class: function() {
+    var show = ((Pcs.clusterController.get("show_all_resources"))? "" : "hidden ");
+    return ((this.get("status_val") == get_status_value("ok") || this.status == "disabled") ? show + "default-hidden" : "");
+  }.property("status_val"),
+  status_class_fence: function() {
+    var show = ((Pcs.clusterController.get("show_all_fence"))? "" : "hidden ");
+    return ((this.get("status_val") == get_status_value("ok")) ? show + "default-hidden" : "");
+  }.property("status", "status_val"),
+  tooltip: function() {
+    var self = this;
+    var out = "";
+    if (self.error_list.length > 0) {
+      out += "<span style='color: red;  font-weight: bold;'>ERRORS:</span><br>\n";
+      out += get_formated_html_list(self.error_list);
+    }
+    if (self.warning_list.length > 0) {
+      out += "<span style='color: orange;  font-weight: bold;'>WARNINGS:</span><br>\n";
+      out += get_formated_html_list(self.warning_list);
+    }
+    return out;
+  }.property("error_list. at each", "warning_list. at each"),
+  span_class: function() {
+    switch (this.get("status_val")) {
+      case get_status_value("failed"):
+        return "status-error";
+      case get_status_value("warning"):
+      case get_status_value("disabled"):
+        return "status-warning";
+      default:
+        return "";
+    }
+  }.property("status_val"),
+
+  location_constraints: [],
+  ordering_constraints: [],
+  ordering_set_constraints: [],
+  colocation_constraints: [],
+
+  get_map: function() {
+    var self = this;
+    var map = {};
+    map[self.get('id')] = self;
+    return map;
+  },
+
+  get_full_warning_list: function() {
+    var self = this;
+    var warning_list = [];
+    $.each(self.get_map(), function(name, resource){
+      warning_list = warning_list.concat(resource.get('warning_list'));
+    });
+    return warning_list;
+  },
+
+  get_full_error_list: function() {
+    var self = this;
+    var error_list = [];
+    $.each(self.get_map(), function(name, resource){
+      error_list = error_list.concat(resource.get('error_list'));
+    });
+    return error_list;
+  },
+
+  update: function(self, data) {
+    $.each(data, function(k, v) {
+      self.set(k, v);
+    });
+    self.refresh();
+  }
+});
+
+Pcs.ResourceStatusObj = Ember.Object.extend({
+  id: null,
+  resource_agent: null,
+  managed: false,
+  failed: false,
+  role: null,
+  active: false,
+  orphaned: false,
+  failure_ignored: false,
+  nodes_running_on: 0,
+  pending: null,
+  node: null
+});
+
+Pcs.ResourceOperationObj = Ember.Object.extend({
+  call_id: 0,
+  crm_debug_origin: null,
+  crm_feature_set: null,
+  exec_time: 0,
+  exit_reason: null,
+  id: null,
+  interval: 0,
+  last_rc_change: 0,
+  last_run: 0,
+  on_node: null,
+  op_digest: null,
+  operation: null,
+  operation_key: null,
+  op_force_restart: null,
+  op_restart_digest: null,
+  op_status: 0,
+  queue_time: 0,
+  rc_code: 0,
+  transition_key: null,
+  transition_magic: null
+});
+
+Pcs.PrimitiveObj = Pcs.ResourceObj.extend({
+  agentname: null,
+  provider: null,
+  type: null,
+  stonith: false,
+  instance_attr: [],
+  instance_status: [],
+  operations: [],
+  utilization: [],
+  resource_type: Ember.computed.alias('agentname'),
+  is_primitive: true,
+  nodes_running_on: function() {
+    var self = this;
+    var nodes = [];
+    var node = null;
+    $.each(self.get('instance_status'), function(index, status) {
+      node = status.get('node');
+      if (node)
+        nodes.push(node.name);
+    });
+    return nodes;
+  }.property('instance_status. at each.node'),
+  is_in_group: function() {
+    var self = this;
+    var p = self.get('parent');
+    return (p && p.get('class_type') == 'group');
+  }.property('parent'),
+  nodes_running_on_string: function() {
+    return this.get('nodes_running_on').join(', ');
+  }.property('nodes_running_on'),
+
+  refresh: function() {
+    var self = this;
+    var stat = self.get("crm_status");
+    var new_stat = [];
+    $.each(stat, function(i,v) {
+      new_stat.push(Pcs.ResourceStatusObj.create(v));
+    });
+    var ops = self.get("operations");
+    var new_ops = [];
+    $.each(ops, function(i,v) {
+      new_ops.push(Pcs.ResourceOperationObj.create(v));
+    });
+    self.set("instance_status", new_stat);
+    self.set("operations", new_ops);
+    self.set("crm_status", null);
+  },
+
+  init: function() {
+    this.refresh();
+  }
+});
+
+Pcs.GroupObj = Pcs.ResourceObj.extend({
+  members: [],
+  is_group: true,
+  children: Ember.computed.alias('members'),
+
+  init: function() {
+    this.refresh();
+  },
+
+  get_map: function() {
+    var self = this;
+    var map = self._super();
+    var members = self.get('members');
+    $.each(members, function(i, m){
+      $.extend(map, m.get_map());
+    });
+    return map;
+  },
+
+  refresh: function() {
+    var self = this;
+    var members = self.get("members");
+    var member;
+    var new_members = [];
+    $.each(members, function(i,v) {
+      member = Pcs.PrimitiveObj.create(v);
+      member.set('parent', self);
+      new_members.push(member);
+    });
+    self.set("members", new_members);
+  }
+});
+
+Pcs.MultiInstanceObj = Pcs.ResourceObj.extend({
+  member: null,
+  children: function() {
+    return [this.get('member')];
+  }.property('member'),
+  unique: false,
+  managed: false,
+  failed: false,
+  failure_ignored: false,
+  is_multi_instance: true,
+
+  get_map: function() {
+    var self = this;
+    var map = self._super();
+    $.extend(map, self.get('member').get_map());
+    return map;
+  },
+
+  init: function() {
+    this.refresh();
+  },
+
+  refresh: function() {
+    var self = this;
+    var member = self.get("member");
+    var new_member = null;
+    switch (member.class_type) {
+      case "primitive":
+        new_member = Pcs.PrimitiveObj.create(member);
+        break;
+      case "group":
+        new_member = Pcs.GroupObj.create(member);
+    }
+    new_member.set('parent', self);
+    self.set("member", new_member);
+  }
+});
+
+Pcs.CloneObj = Pcs.MultiInstanceObj.extend({
+  is_clone: true
+});
+
+Pcs.MasterSlaveObj = Pcs.MultiInstanceObj.extend({
+  masters: [],
+  slaves: [],
+  resource_type: 'Master/Slave'
+});
+
 Pcs.Router.map(function() {
   this.route("Configuration", { path: "configure"});
+
   this.resource("ACLs", {path: "acls/:aclrole_id"}, function () {
     this.route("new");
   });
   this.route("ACLs", {path: "acls"});
+
   this.resource("Fence Devices", {path: "fencedevices/:stonith_id"}, function () {
     this.route('new');
   });
   this.route("Fence Devices", { path: "fencedevices"});
+
   this.resource("Resources", {path: "resources/:resource_id"}, function () {
     this.route('new');
   });
   this.route("Resources", { path: "resources"});
+
   this.resource("Nodes", {path: "nodes/:node_id"}, function () {
     this.route('new');
   });
   this.route("Nodes", { path: "nodes"});
+
 //  this.resource("Resource", {path: 'resources/:resource_id'});
   this.route("Manage", {path: "manage"});
   this.route("Wizards", {path: "wizards"});
@@ -149,10 +975,24 @@ Pcs.WizardsRoute = Ember.Route.extend({
 
 Pcs.IndexRoute = Ember.Route.extend({
   setupController: function(controller, model) {
-    if (window.location.pathname == "/manage" || window.location.pathname == "/manage/")
+    if (
+      window.location.pathname == "/manage"
+      ||
+      window.location.pathname == "/manage/"
+    ) {
       select_menu("MANAGE");
-    else
+    }
+    else if (
+      window.location.pathname == "/permissions"
+      ||
+      window.location.pathname == "/permissions/"
+    ) {
+      select_menu("PERMISSIONS");
+      Ember.run.scheduleOnce('afterRender', this, permissions_load_all);
+    }
+    else {
       select_menu("NODES");
+    }
   }
 });
 
@@ -170,9 +1010,8 @@ Pcs.FenceDevicesRoute = Ember.Route.extend({
     select_menu("FENCE DEVICES");
   },
   model: function(params) {
-    Ember.debug("Router FD: " + params.stonith_id);
-    Pcs.opening_resource = params.stonith_id;
-    return null;
+    Pcs.resourcesContainer.set('fence_id_to_load', params.stonith_id);
+    return params.stonith_id;
   }
 });
 
@@ -205,13 +1044,13 @@ Pcs.ConfigurationRoute = Ember.Route.extend({
 Pcs.ResourcesRoute = Ember.Route.extend({
   setupController: function(controller, model) {
     if (model) {
-      select_menu("RESOURCES",model.name); 
+      select_menu("RESOURCES",model.name);
     } else {
       select_menu("RESOURCES"); 
     }
   },
   model: function(params) {
-    Pcs.opening_resource = params.resource_id;
+    Pcs.resourcesContainer.set('resource_id_to_load', params.resource_id);
     return params.resource_id;
   }
 });
@@ -222,76 +1061,66 @@ Pcs.Setting = Ember.Object.extend({
   type: null
 });
 
-Pcs.Resource = Ember.Object.extend({
+Pcs.Clusternode = Ember.Object.extend({
   name: null,
-  id: function() {
-    return this.name;
-  }.property("name"),
-  ms: false,
-  clone: false,
-  full_name: function() {
-    if (this.ms)
-      return this.name + " (M/S)";
-    if (this.clone)
-      return this.name + " (Clone)";
-    return this.name;
-  }.property("name","ms","clone"),
-  cur_resource: false,
-  checked: false,
-  nodes_running: [],
-  up: function() {
-    return this.active;
-  }.property("active"),
-  resource_name_style: function() {
-    if (this.active && !this.failed) {
-      return "";
+  id: Ember.computed.alias("name"),
+  status: null,
+  status_unknown: function() {
+    return this.get('status') == "unknown";
+  }.property("status"),
+  status_val: function() {
+    var status_val = get_status_value(this.get('status'));
+    if (this.get('warning_list').length)
+      status_val = get_status_value("warning");
+    if (this.get('error_list').length)
+      status_val = get_status_value("error");
+    if ((get_status_value(this.get('status')) - status_val) < 0) {
+      return get_status_value(this.get('status'));
     } else {
-      return "color:red";
+      return status_val;
     }
-  }.property("active", "failed"),
-
-  trclass: function(){
-    if (this.cur_resource == true)
-      return "node_selected";
-    else
-      return ""
-  }.property("cur_resource"),
-  onmouseover: function(){
-    if (this.cur_resource == true)
-      return ""
-    else
-      return "hover_over(this);"
-  }.property("cur_resource"),
-  onmouseout: function(){
-    if (this.cur_resource == true)
-      return ""
-    else
-      return "hover_out(this);"
-  }.property("cur_resource"),
-  res_class: function() {
-    cpt = this.agentname.split(":");
-    return cpt[0];
-  }.property("agentname"),
-  res_provider: function() {
-    cpt = this.agentname.split(":");
-    return cpt[2];
-  }.property("agentname"),
-  res_type: function() {
-    cpt = this.agentname.split(":");
-    if (this.stonith) 
-      return cpt[1];
-    return cpt[3];
-  }.property("agentname"),
-  showArrow: function(){
-    if (this.cur_resource != true)
-      return "display:none;"
-    else
-      return ""
-  }.property("cur_resource")
-});
-
-Pcs.Clusternode = Ember.Object.extend({
-  name: null,
+  }.property('status', 'error_list. at each.message', 'warning_list. at each.message'),
+  status_style: function() {
+    var color = get_status_color(this.get("status_val"));
+    return "color: " + color + ((color != "green")? "; font-weight: bold;" : "");
+  }.property("status_val"),
+  status_class: function() {
+    var show = ((Pcs.clusterController.get("show_all_nodes"))? "" : "hidden ");
+    return (
+      (this.get("status_val") == get_status_value("ok") || this.status == "standby" ||
+      this.status == "maintenance")
+        ? show + "default-hidden" : ""
+    );
+  }.property("status_val"),
+  status_icon: function() {
+    var icon_class = get_status_icon_class(this.get("status_val"));
+    return "<div style=\"float:left;margin-right:6px;\" class=\"" + icon_class + " sprites\"></div>";
+  }.property("status_val"),
+  error_list: [],
+  warning_list: [],
+  tooltip: function() {
+    var self = this;
+    var out = "";
+    if (self.error_list && self.error_list.length > 0) {
+      out += "<span style='color: red;  font-weight: bold;'>ERRORS:</span><br>\n";
+      out += get_formated_html_list(self.error_list);
+    }
+    if (self.warning_list && self.warning_list.length > 0) {
+      out += "<span style='color: orange;  font-weight: bold;'>WARNINGS:</span><br>\n";
+      out += get_formated_html_list(self.warning_list);
+    }
+    return out;
+  }.property("error_list", "warning_list"),
+  quorum: null,
+  quorum_show: function() {
+    if (this.status == "unknown" || this.status == "offline" || this.get('quorum') === null) {
+      return '<span style="color: orange; font-weight: bold;">unknown</span>';
+    } else if (this.quorum) {
+      return '<span style="color: green;">YES</span>';
+    } else {
+      return '<span style="color: red; font-weight: bold;">NO</span>';
+    }
+  }.property("status", "quorum"),
   cur_node: false,
   checked: false,
   resources_running: [],
@@ -320,15 +1149,20 @@ Pcs.Clusternode = Ember.Object.extend({
       return ""
   }.property("cur_node"),
   node_name_style: function() {
-    if (this.up) {
+    if (this.up && !this.get('pacemaker_maintenance')) {
       return "";
     } else {
-      if (this.pacemaker_standby)
-      	return "color: #ff6600";
+      if (this.get("pacemaker_standby") || this.get("pacemaker_maintenance"))
+        return "color: #ff6600";
       else
-	return "color:red";
+        return "color:red";
     }
-  }.property("up","pacemaker_standby"),
+  }.property("up","pacemaker_standby","pacemaker_maintenance"),
+  pacemaker_standby: null,
+  pacemaker_maintenance: Ember.computed.alias('is_in_maintenance'),
+  corosync_enabled: null,
+  pacemaker_enabled: null,
+  pcsd_enabled: null,
   standby_style: function () {
     if (this.pacemaker_standby)
       return "display: none;";
@@ -359,28 +1193,331 @@ Pcs.Clusternode = Ember.Object.extend({
     else
       return "Disabled";
   }.property("pcsd_enabled"),
-  location_constraints: null
+  location_constraints: null,
+  node_attrs: [],
+  utilization: [],
+  is_in_maintenance: function() {
+    var self = this;
+    var result = false;
+    $.each(self.get('node_attrs'), function(_, attr) {
+      if (attr["name"] == "maintenance") {
+        result = is_cib_true(attr["value"]);
+        return false; // break foreach loop
+      }
+    });
+    return result;
+  }.property('node_attrs'),
+  fence_levels: [],
+  pcsd: null,
+  corosync_daemon: null,
+  pacemaker_daemon: null,
+});
+
+Pcs.Aclrole = Ember.Object.extend({
+  name: null,
+  cur_role: false,
+  checked: false,
+  description: "",
+  user_list: null,
+  group_list: null,
+  trclass: function() {
+    return this.cur_role ? "node_selected" : "";
+  }.property("cur_role"),
+  onmouseover: function() {
+    return this.cur_role ? "" : "hover_over(this);"
+  }.property("cur_role"),
+  onmouseout: function() {
+    return this.cur_role ? "" : "hover_out(this);"
+  }.property("cur_role"),
+  showArrow: function(){
+    return this.cur_role ? "" : "display:none";
+  }.property("cur_role"),
 });
 
-Pcs.Aclrole = Ember.Object.extend({
-  name: null,
-  cur_role: false,
-  checked: false,
-  description: "",
-  user_list: null,
-  group_list: null,
-  trclass: function() {
-    return this.cur_role ? "node_selected" : "";
-  }.property("cur_role"),
-  onmouseover: function() {
-    return this.cur_role ? "" : "hover_over(this);"
-  }.property("cur_role"),
-  onmouseout: function() {
-    return this.cur_role ? "" : "hover_out(this);"
-  }.property("cur_role"),
-  showArrow: function(){
-    return this.cur_role ? "" : "display:none";
-  }.property("cur_role"),
+Pcs.Cluster = Ember.Object.extend({
+  name: null,
+  url_link: function(){return get_cluster_remote_url(this.name) + "main";}.property("name"),
+  input_name: function(){return "clusterid-" + this.name;}.property("name"),
+  div_id: function(){return "cluster_info_" + this.name}.property("name"),
+  status: "unknown",
+  status_unknown: function() {
+    return this.status == "unknown";
+  }.property("status"),
+  forbidden: function() {
+    var out = false;
+    $.each(this.get("error_list"), function(key, value) {
+      if ("forbidden" == value["type"]) {
+        out = true;
+      }
+    });
+    return out;
+  }.property("error_list"),
+  status_icon: function() {
+    var icon_class = get_status_icon_class(get_status_value(this.get('status')));
+    return "<div style=\"float:left;margin-right:6px;\" class=\"" + icon_class + " sprites\"></div>";
+  }.property("status"),
+  quorum_show: function() {
+    if (this.get('status') == "unknown") {
+      return "<span style='color:orange'>(quorate unknown)</span>"
+    } else if (!this.get('quorate')) {
+      return "<span style='color: red'>(doesn't have quorum)</span>"
+    } else {
+      return ""
+    }
+  }.property("status", "quorate"),
+  nodes: [],
+  nodes_failed: 0,
+  resource_list: [],
+  resources_failed: 0,
+  fence_list: [],
+  fence_failed: 0,
+  error_list: [],
+  warning_list: [],
+  need_reauth: false,
+  quorate: false,
+
+  get_num_of_failed: function(type) {
+    var num = 0;
+    $.each(this.get(type), function(key, value) {
+      if (value.get("status_val") < get_status_value("ok") &&
+        value.status != "disabled" && value.status != "standby" &&
+        value.status != "maintenance"
+      ) {
+        num++;
+      }
+    });
+    return num;
+  },
+
+  status_sort: function(a,b) {
+    if (a.get("status_val") == b.get("status_val"))
+      return ((a.status == b.status) ? a.get('name').localeCompare(b.get('name')) : ((a.status > b.status) ? 1 : -1));
+    return status_comparator(a.status, b.status)
+  },
+
+  add_resources: function(data) {
+    var self = this;
+    var resources = [];
+    var fence = [];
+    var resource_obj;
+    $.each(data, function (index, resource) {
+      switch (resource["class_type"]) {
+        case "primitive":
+          resource_obj = Pcs.PrimitiveObj.create(resource);
+          break;
+        case "group":
+          resource_obj = Pcs.GroupObj.create(resource);
+          break;
+        case "clone":
+          resource_obj = Pcs.CloneObj.create(resource);
+          break;
+        case "master":
+          resource_obj = Pcs.MasterSlaveObj.create(resource);
+          break;
+      }
+
+      var url_link = get_cluster_remote_url(self.get('name')) + "main#/" +
+        (resource_obj.get('stonith') ? "fencedevices/" : "resources/") +
+        resource_obj.get('id');
+      resource_obj.set('url_link', url_link);
+
+      resource_obj.set('warning_list', resource_obj.get_full_warning_list());
+      resource_obj.set('error_list', resource_obj.get_full_error_list());
+
+      if (resource_obj.stonith) {
+        fence.pushObject(resource_obj);
+      } else {
+        resources.pushObject(resource_obj);
+      }
+    });
+    resources.sort(self.status_sort);
+    fence.sort(self.status_sort);
+    self.set('fence_list', fence);
+    self.set('resource_list', resources);
+  },
+
+  add_nodes: function(data, node_attrs) {
+    var self = this;
+    self.set("need_reauth", false);
+    var nodes = [];
+    var node;
+    $.each(data, function(key, val) {
+      if (val["warning_list"]) {
+        $.each(val["warning_list"], function (key, value) {
+          if (self.get('need_reauth'))
+            return false;
+          if (typeof(value.type) !== 'undefined' && value.type == "nodes_not_authorized") {
+            self.set("need_reauth", true);
+          }
+        });
+      }
+
+      var attrs = [];
+      if (node_attrs && val["name"] in node_attrs) {
+        attrs = node_attrs[val["name"]];
+      }
+
+      node = Pcs.Clusternode.create({
+        name: val["name"],
+        url_link: get_cluster_remote_url(self.name) + "main#/nodes/" + val["name"],
+        status: val["status"],
+        quorum: val["quorum"],
+        error_list: val["error_list"],
+        warning_list: val["warning_list"]
+      });
+      node.set("node_attrs", attrs);
+      if (node.get("is_in_maintenance") && node.get('status_val') > get_status_value("maintenance")) {
+        node.set("status", "maintenance");
+      }
+      nodes.push(node);
+    });
+    nodes.sort(self.status_sort);
+    self.set("nodes", nodes);
+  }
+});
+
+Pcs.clusterController = Ember.Object.create({
+  cluster_list: Ember.ArrayController.create({
+    content: Ember.A(),
+    sortProperties: ['name'],
+    sortAscending: true
+  }),
+  cur_cluster: null,
+  show_all_nodes: false,
+  show_all_resources: false,
+  show_all_fence: false,
+  num_ok: 0,
+  num_error: 0,
+  num_warning: 0,
+  num_unknown: 0,
+
+  update_cur_cluster: function(cluster_name) {
+    var self = this;
+    $("#clusters_list div.arrow").hide();
+    var selected_cluster = null;
+
+    $.each(self.get('cluster_list').get('content'), function(key, cluster) {
+      if (cluster.get("name") == cluster_name) {
+        selected_cluster = cluster;
+        return false;
+      }
+    });
+
+    self.set('cur_cluster', selected_cluster);
+    if (selected_cluster) {
+      Ember.run.next(function() {
+        $("#clusters_list tr[nodeID=" + cluster_name + "] div.arrow").show();
+        correct_visibility_dashboard(self.get('cur_cluster'));
+      });
+    }
+  },
+
+  update: function(data) {
+    var self = this;
+    var clusters = data["cluster_list"];
+    var cluster_name_list = [];
+    self.set("num_ok", 0);
+    self.set("num_error", 0);
+    self.set("num_warning", 0);
+    self.set("num_unknown", 0);
+
+    $.each(clusters, function(key, value) {
+      cluster_name_list.push(value["cluster_name"]);
+      var found = false;
+      var cluster = null;
+
+      $.each(self.get('cluster_list').get('content'), function(key, pre_existing_cluster) {
+        if (pre_existing_cluster && pre_existing_cluster.get('name') == value["cluster_name"]) {
+          found = true;
+          cluster = pre_existing_cluster;
+          cluster.set("status", value["status"]);
+          cluster.set("quorate",value["quorate"]);
+          cluster.set("error_list",value["error_list"]);
+          cluster.set("warning_list",value["warning_list"]);
+        }
+      });
+
+      if (!found) {
+        cluster = Pcs.Cluster.create({
+          name: value["cluster_name"],
+          status: value["status"],
+          quorate: value["quorate"],
+          error_list: value["error_list"],
+          warning_list: value["warning_list"]
+        });
+      }
+
+      cluster.add_nodes(value["node_list"], value["node_attr"]);
+      cluster.add_resources(value["resource_list"]);
+      cluster.set("nodes_failed", cluster.get_num_of_failed("nodes"));
+      cluster.set("resources_failed", cluster.get_num_of_failed("resource_list"));
+      cluster.set("fence_failed", cluster.get_num_of_failed("fence_list"));
+
+      if (cluster.get('status') == "ok") {
+        $.each(cluster.get('fence_list').concat(cluster.get('resource_list')), function(index, res) {
+          if (res.get('warning_list').length > 0) {
+            cluster.set("status", "warning");
+            return false;
+          }
+        });
+      }
+
+      var nodes_to_auth = [];
+      $.each(cluster.get('warning_list'), function(key, val){
+        if (val.hasOwnProperty("type") && val.type == "nodes_not_authorized"){
+          nodes_to_auth = nodes_to_auth.concat(val['node_list']);
+        }
+      });
+      nodes_to_auth = $.unique(nodes_to_auth);
+
+      if (cluster.get('need_reauth') || nodes_to_auth.length > 0) {
+        cluster.get('warning_list').pushObject({
+          message: "There are few authentication problems. To fix them, click <a href='#' onclick='auth_nodes_dialog(" + JSON.stringify(nodes_to_auth) + ", null, function() {fix_auth_of_cluster();})'>here</a>.",
+          type: "nodes_not_authorized",
+          node_list: self.nodes_to_auth
+        });
+      }
+
+      if (!found) {
+        self.get('cluster_list').pushObject(cluster);
+      }
+
+      if (cluster.get_num_of_failed("nodes") == cluster.nodes.length) {
+        if (cluster.get('status') != "unknown")
+          cluster.get('warning_list').pushObject({
+            message: "Cluster is offline"
+          });
+
+        cluster.set("status", "unknown");
+      }
+
+      switch (get_status_value(cluster.get('status'))) {
+        case get_status_value("ok"):
+          self.incrementProperty('num_ok');
+          break;
+        case get_status_value("error"):
+          self.incrementProperty('num_error');
+          break;
+        case get_status_value("warning"):
+          self.incrementProperty('num_warning');
+          break;
+        default:
+          self.incrementProperty('num_unknown');
+          break;
+      }
+    });
+
+    var to_remove = [];
+    $.each(self.get('cluster_list').get('content'), function(key,val) {
+      if (cluster_name_list.indexOf(val.get('name')) == -1) {
+        to_remove.pushObject(val);
+      }
+    });
+
+    $.each(to_remove, function(index, val) {
+      self.get('cluster_list').removeObject(val);
+    });
+  }
 });
 
 Pcs.aclsController = Ember.ArrayController.createWithMixins({
@@ -413,25 +1550,23 @@ Pcs.aclsController = Ember.ArrayController.createWithMixins({
     var my_groups = {}, my_users = {}, my_roles = {};
     var cur_role_holder = "";
     var cur_role_name = "";
-    $.each(data, function(key, value) {
-      if (value["acls"]) {
-        if (value["acls"]["group"]) {
-          $.each(value["acls"]["group"], function (k2,v2) {
-            my_groups[k2] = v2;
-          });
-        }
-        if (value["acls"]["user"]) {
-          $.each(value["acls"]["user"], function (k2,v2) {
-            my_users[k2] = v2;
-          });
-        }
-        if (value["acls"]["role"]) {
-          $.each(value["acls"]["role"], function (k2,v2) {
-            my_roles[k2] = v2;
-          });
-        }
+    if (data["acls"]) {
+      if (data["acls"]["group"]) {
+        $.each(data["acls"]["group"], function (k2,v2) {
+          my_groups[k2] = v2;
+        });
       }
-    });
+      if (data["acls"]["user"]) {
+        $.each(data["acls"]["user"], function (k2,v2) {
+          my_users[k2] = v2;
+        });
+      }
+      if (data["acls"]["role"]) {
+        $.each(data["acls"]["role"], function (k2,v2) {
+          my_roles[k2] = v2;
+        });
+      }
+    }
     self.set('roles',my_roles);
     self.set('users',my_users);
     self.set('groups',my_groups);
@@ -528,599 +1663,14 @@ Pcs.settingsController = Ember.ArrayController.create({
     var self = this;
     var settings = {};
     self.set('content',[]);
-    $.each(data, function(key, value) {
-      if (value["cluster_settings"]) {
-	$.each(value["cluster_settings"], function(k2, v2) {
-	  var setting = Pcs.Setting.create({
-	    name: k2,
-	    value: v2
-	  });
-	  self.pushObject(setting);
-	});
-      }
-    });
-  }
-});
-
-Pcs.resourcesController = Ember.ArrayController.createWithMixins({
-  content: [],
-  parentIDMapping: {},
-  sortProperties: ['name'],
-  sortAscending: true,
-  no_resources: function () {
-    if (this.content.length == 0)
-      return true;
-    else
-      return false;
-  }.property("@each.content"),
-  cur_resource: null,
-  cur_resource_res: null,
-  cur_resource_ston: null,
-  cur_resource_info_style: function () {
-    if (typeof this.cur_resource != 'undefined' && this.cur_resource != null)
-      return "";
-    else
-      return "opacity:0";
-  }.property("cur_resource"),
-  stonith_resource_list: function () {
-    var list = [];
-    this.content.map(function (item) {
-      if (item.stonith)
-        list.push(item.name);
-    });
-    return list;
-  }.property("@each.content"),
-  init: function(){
-    this._super();
-  },
-
-  update_cur_resource: function() {
-    if (this.get("cur_resource")) { 
-      cr = this.get("cur_resource").name;
-      $.each(this.content, function(key, value) {
-	if (value.name == cr)
-	  value.set("cur_resource", true);
-	else
-	  value.set("cur_resource", false);
-      });
-    }
-    this.auto_show_hide_constraints();
-  },
-    
-  load_resource: function(resource_row, dont_update_hash) {
-    if (resource_row.length == 0)
-      return;
-    load_agent_form(resource_row, false);
-    if (!dont_update_hash)
-      window.location.hash = "/resources/" + $(resource_row).attr("nodeID");
-
-    if (Pcs.cur_page == "resources")
-      load_row(resource_row, this, 'cur_resource', "#resource_info_div", 'cur_resource_res', false);
-    else
-      load_row(resource_row, this, 'cur_resource', "#resource_info_div", 'cur_resource_res', true);
-  },
-
-  load_stonith: function(resource_row, dont_update_hash) {
-    if (resource_row.length == 0)
-      return;
-
-    load_agent_form(resource_row, true);
-    if (!dont_update_hash)
-      window.location.hash = "/fencedevices/" + $(resource_row).attr("nodeID");
-
-    if (Pcs.cur_page == "stonith")
-      load_row(resource_row, this, 'cur_resource', "#stonith_info_div", 'cur_resource_ston', false);
-    else
-      load_row(resource_row, this, 'cur_resource', "#stonith_info_div", 'cur_resource_ston', true);
-  },
-
-  auto_show_hide_constraints: function() {
-    var cont = ["location_constraints", "ordering_constraints", "ordering_set_constraints", "colocation_constraints", "meta_attr"];
-    cont.forEach(function(name) {
-    var elem = $("#" + name)[0];
-    var resource = Pcs.resourcesController.get("cur_resource_res");
-      if (elem && resource) {
-        var visible = $(elem).children("span")[0].style.display != 'none';
-        if (visible && (!resource.get(name) || resource[name].length == 0))
-          show_hide_constraints(elem);
-        else if (!visible && resource.get(name) && resource[name].length > 0)
-          show_hide_constraints(elem);
-      }
-    });
-  },
-
-  add_meta_attr: function(res_id, mkey, mvalue) {
-    $.each(this.content, function(key, value) {
-      if (value.name == res_id) {
-	var meta_attrs = [];
-	if (value.meta_attr) {
-	  meta_attrs = value.meta_attr;
-	}
-
-	var found = false;
-	$.each(meta_attrs, function (index,attr) {
-	  if (attr.key == mkey) {
-	    attr.value = mvalue;
-	    found = true;
-	  }
-	});
-
-	if (!found) {
-	  meta_attrs.pushObject({key: mkey, value: mvalue})
-	}
-	value.set("meta_attr", meta_attrs);
-      }
-    });
-  },
-
-  add_loc_constraint: function(res_id, constraint_id, node_id, score) {
-    new_loc_constraint = {}
-    new_loc_constraint["id"] = constraint_id;
-    new_loc_constraint["rsc"] = res_id;
-    new_loc_constraint["node"] = node_id;
-    new_loc_constraint["score"] = score;
-    new_loc_constraint["temp"] = true;
-
-    $.each(this.content, function(key, value) {
-      if (value.name == res_id) {
-	if (value.get("location_constraints")) {
-	  var res_loc_constraints = {};
-	  $.each(value.get("location_constraints"), function (key, value) {
-	    if (res_id in res_loc_constraints)
-	      res_loc_constraints[res_id].push(value);
-	    else res_loc_constraints[res_id] = [value];
-	  });
-	  res_loc_constraints[res_id].push(new_loc_constraint);
-	  value.set("location_constraints", res_loc_constraints[res_id]);
-	} else {
-	  value.set("location_constraints", [new_loc_constraint]);
-	}
-      }
-    });
-  },
-
-  add_ord_constraint: function(res_id, constraint_id, target_res_id, res_action, target_action, order, score) {
-    new_ord_constraint = {}
-    new_ord_constraint["id"] = constraint_id;
-    new_ord_constraint["res_id"] = res_id;
-    new_ord_constraint["order"] = order;
-    new_ord_constraint["score"] = score;
-    new_ord_constraint["other_rsc"] = target_res_id;
-    new_ord_constraint["temp"] = true;
-    if (order == "before") {
-      new_ord_constraint["before"] = true;
-      new_ord_constraint["first"] = target_res_id;
-      new_ord_constraint["then"] = res_id;
-      new_ord_constraint["first-action"] = target_action;
-      new_ord_constraint["then-action"] = res_action;
-    }
-    else {
-      new_ord_constraint["first"] = res_id;
-      new_ord_constraint["then"] = target_res_id;
-      new_ord_constraint["first-action"] = res_action;
-      new_ord_constraint["then-action"] = target_action;
-    }
-
-    $.each(this.content, function(key, value) {
-      if (value.name == res_id) {
-	if (value.get("ordering_constraints")) {
-	  var res_ord_constraints = {};
-	  $.each(value.get("ordering_constraints"), function (key, value) {
-	    if (res_id in res_ord_constraints)
-	      res_ord_constraints[res_id].push(value);
-	    else res_ord_constraints[res_id] = [value];
-	  });
-	  if (res_id in res_ord_constraints) {
-	    res_ord_constraints[res_id].push(new_ord_constraint);
-	  }
-	  else {
-	    res_ord_constraints[res_id] = [new_ord_constraint];
-	  }
-	  value.set("ordering_constraints", res_ord_constraints[res_id]);
-	} else {
-	  value.set("ordering_constraints", [new_ord_constraint]);
-	}
-      }
-    });
-  },
-
-  add_ord_set_constraint: function(res_id_list, constraint_id, set_id) {
-    var new_constraint = {};
-    new_constraint['id'] = constraint_id;
-    new_constraint['sets'] = [{
-      'id': set_id,
-      'resources': res_id_list,
-    }];
-
-    $.each(this.content, function(key, value) {
-      if (res_id_list.indexOf(value.name) != -1) {
-        if (value.get('ordering_set_constraints')) {
-          var res_id = value.name;
-          var res_ord_set_constraints = {};
-          $.each(value.get('ordering_set_constraints'), function(key, value) {
-            if (res_id in res_ord_set_constraints) {
-              res_ord_set_constraints[res_id].push(value);
-            }
-            else {
-              res_ord_set_constraints[res_id] = [value]
-            }
-          });
-          if (res_id in res_ord_set_constraints) {
-            res_ord_set_constraints[res_id].push(new_constraint);
-          }
-          else {
-            res_ord_set_constraints[res_id] = [new_constraint];
-          }
-          value.set('ordering_set_constraints', res_ord_set_constraints[res_id]);
-        }
-        else {
-          value.set('ordering_set_constraints', [new_constraint]);
-        }
-      }
-    });
-  },
-
-  add_col_constraint: function(res_id, constraint_id, target_res_id, colocation_type, score) {
-    new_col_constraint = {}
-    new_col_constraint["id"] = constraint_id;
-    new_col_constraint["res_id"] = res_id;
-    new_col_constraint["score"] = score;
-    new_col_constraint["other_rsc"] = target_res_id;
-    if (colocation_type == "apart")
-      new_col_constraint["together"] = "Apart";
-    else
-      new_col_constraint["together"] = "Together";
-
-    new_col_constraint["temp"] = true;
-
-    $.each(this.content, function(key, value) {
-      if (value.name == res_id) {
-	if (value.get("colocation_constraints") && value.get("colocation_constraints").length > 0) {
-	  var res_col_constraints = {};
-	  $.each(value.get("colocation_constraints"), function (key, value) {
-	    if (res_id in res_col_constraints)
-	      res_col_constraints[res_id].push(value);
-	    else res_col_constraints[res_id] = [value];
-	  });
-	  res_col_constraints[res_id].push(new_col_constraint);
-	  value.set("colocation_constraints", res_col_constraints[res_id]);
-	} else {
-	  value.set("colocation_constraints", [new_col_constraint]);
-	}
-      }
-    });
-  },
-  remove_constraint: function(constraint_id) {
-    $.each(this.content, function(key, value) {
-      $.each(
-        [
-          "location_constraints",
-          "ordering_constraints", "ordering_set_constraints",
-          "colocation_constraints",
-        ],
-        function(constraint_key, constraint_type) {
-          if (value[constraint_type]) {
-            value.set(
-              constraint_type,
-              $.grep(
-                value[constraint_type],
-                function(value2, key) { return value2.id != constraint_id; }
-              )
-            );
-          }
-        }
-      );
-    });
-  },
-
-
-  update: function(data) {
-    var self = this;
-    var resources = {};
-    var resource_clone_nodes = {};
-    var ord_con = {}
-    var loc_con = {}
-    var col_con = {}
-    var ord_set_con = {}
-    var res_loc_constraints = {};
-    var res_ord_constraints = {};
-    var res_ord_set_constraints = {};
-    var res_col_constraints = {};
-    var group_list = [];
-    self.parentIDMapping = {};
-    $.each(data, function(key, value) {
-      if (value["resources"]) {
-	$.each(value["resources"], function(k2, v2) {
-	  // Use resource_clone_nodes to handle resources with multiple ndoes
-	  if (!(v2["id"] in resource_clone_nodes)) {
-	    resource_clone_nodes[v2["id"]] = [];
-	  }
-
-	  if ("nodes" in v2) {
-	    $.each(v2["nodes"], function(node_num, node_name) {
-	      if ($.inArray(node_name, resource_clone_nodes[v2["id"]]) == -1) {
-		resource_clone_nodes[v2["id"]].push(node_name);
-	      }
-	    });
-	  }
-
-	  resources[v2["id"]] = v2;
-	  if ((msg_id = v2["group"]) || (msg_id = v2["clone_id"]) || (msg_id = v2["ms_id"])) {
-	    self.parentIDMapping[msg_id] = self.parentIDMapping[msg_id] || [];
-	    if (self.parentIDMapping[msg_id].indexOf(v2["id"]) == -1) {
-	      self.parentIDMapping[msg_id].push(v2["id"]);
-	    }
-	  }
-	  resources[v2["id"]]["nodes"] = resource_clone_nodes[v2["id"]].sort();
-	});
-      }
-
-      if (value["groups"]) {
-        $.each(value["groups"], function(index, group) {
-          if (group_list.indexOf(group) == -1) {
-            group_list.push(group);
-          }
+    if (data["cluster_settings"]) {
+      $.each(data["cluster_settings"], function(k2, v2) {
+        var setting = Pcs.Setting.create({
+          name: k2,
+          value: v2
         });
-      }
-
-      if (value["constraints"]) {
-	if (value["constraints"]["rsc_location"]) {
-	  $.each(value["constraints"]["rsc_location"], function (key, value) {
-	    loc_con[value["id"]] = value;
-	  });
-	}
-	if (value["constraints"]["rsc_order"]) {
-	  $.each(value["constraints"]["rsc_order"], function (key, value) {
-	    if (value["sets"]) {
-	      ord_set_con[value["id"]] = value;
-	    }
-	    else {
-	      ord_con[value["id"]] = value;
-	    }
-	  });
-	}
-	if (value["constraints"]["rsc_colocation"]) {
-	  $.each(value["constraints"]["rsc_colocation"], function (key, value) {
-	    col_con[value["id"]] = value;
-	  });
-	}
-      }
-    });
-
-    update_resource_form_groups($("#new_resource_agent"), group_list.sort());
-
-    $.each(loc_con, function (key, value) {
-      res_loc_constraints[value["rsc"]] = res_loc_constraints[value["rsc"]] || [];
-      res_loc_constraints[value["rsc"]].push(value);
-      if (self.parentIDMapping[value["rsc"]]) {
-	$.each(self.parentIDMapping[value["rsc"]], function(index,map) {
-	  res_loc_constraints[map] = res_loc_constraints[map] || [];
-	  res_loc_constraints[map].push(value);
-	});
-      }
-    });
-
-    var cur_res_holder = "";
-    var cur_res_holder_res = "";
-    var cur_res_holder_ston = "";
-    if (self.cur_resource)
-      cur_res_holder = self.cur_resource.name;
-    if (self.cur_resource_res)
-      cur_res_holder_res = self.cur_resource_res.name;
-    if (self.cur_resource_ston)
-      cur_res_holder_ston = self.cur_resource_ston.name;
-
-    self.set("cur_resource",null);
-    self.set("cur_resource_res",null);
-    self.set("cur_resource_ston",null);
-
-    resources_checked = {};
-    $.each(self.content, function (key, value) {
-      if (value.checked)
-	resources_checked[value.name] = true;
-    });
-
-
-    $.each(ord_con, function (key, value) {
-      first = $.extend({"other_rsc":value["then"],"before":false}, value);
-      if (value["first"] in res_ord_constraints)
-	res_ord_constraints[value["first"]].push(first);
-      else res_ord_constraints[value["first"]] = [first];
-      then = $.extend({"other_rsc":value["first"],"before":true}, value);
-      if (value["then"] in res_ord_constraints)
-	res_ord_constraints[value["then"]].push(then);
-      else res_ord_constraints[value["then"]] = [then];
-
-      if (self.parentIDMapping[value["first"]]) {
-	$.each(self.parentIDMapping[value["first"]], function(index,map) {
-	  res_ord_constraints[map] = res_ord_constraints[map] || [];
-	  res_ord_constraints[map].push(first);
-	});
-      }
-      if (self.parentIDMapping[value["then"]]) {
-	$.each(self.parentIDMapping[value["then"]], function(index,map) {
-	  res_ord_constraints[map] = res_ord_constraints[map] || [];
-	  res_ord_constraints[map].push(then);
-	});
-      }
-    });
-
-    $.each(ord_set_con, function(key, set_con) {
-      $.each(set_con["sets"], function(key, set) {
-        $.each(set["resources"], function(key, resource) {
-          res_ord_set_constraints[resource] = res_ord_set_constraints[resource] || [];
-          if (res_ord_set_constraints[resource].indexOf(set_con) != -1) {
-            return;
-          }
-          res_ord_set_constraints[resource].push(set_con);
-          if (self.parentIDMapping[resource]) {
-            $.each(self.parentIDMapping[resource], function(index, map) {
-              res_ord_set_constraints[map] = res_ord_set_constraints[map] || [];
-              res_ord_set_constraints[map].push(set_con);
-            });
-          }
-        })
-      })
-    });
-
-    $.each(col_con, function (key, value) {
-      if (value["score"] == "INFINITY")
-	value["together"] = "Together";
-      else if (value["score"] == "-INFINITY" || value["score"] < 0)
-	value["together"] = "Apart";
-      else if (value["score"] >= 0)
-	value["together"] = "Together";
-
-      first = $.extend({"other_rsc":value["with-rsc"],"first":true}, value);
-      if (value["rsc"] in res_col_constraints)
-	res_col_constraints[value["rsc"]].push(first);
-      else res_col_constraints[value["rsc"]] = [first];
-      second = $.extend({"other_rsc":value["rsc"],"first":false}, value);
-      if (value["with-rsc"] in res_col_constraints)
-	res_col_constraints[value["with-rsc"]].push(second);
-      else res_col_constraints[value["with-rsc"]] = [second];
-
-      if (self.parentIDMapping[value["rsc"]]) {
-	$.each(self.parentIDMapping[value["rsc"]], function(index,map) {
-	  res_col_constraints[map] = res_col_constraints[map] || [];
-	  res_col_constraints[map].push(first);
-	});
-      }
-      if (self.parentIDMapping[value["with-rsc"]]) {
-	$.each(self.parentIDMapping[value["with-rsc"]], function(index,map) {
-	  res_col_constraints[map] = res_col_constraints[map] || [];
-	  res_col_constraints[map].push(second);
-	});
-      }
-    });
-
-//    self.set('content',[]);
-    $.each(resources, function(key, value) {
-      found = false;
-      var resource = null;
-      $.each(self.content, function(key, pre_existing_resource) {
-	if (pre_existing_resource && pre_existing_resource.name == value["id"]) {
-	  found = true;
-	  resource = pre_existing_resource;
-	  resource.set("agentname", value["agentname"]);
-	  resource.set("active", value["active"]);
-	  resource.set("disabled", value["disabled"]);
-	  resource.set("nodes", value["nodes"]);
-	  resource.set("node_list", value["nodes"].join(", "));
-	  resource.set("group", value["group"]);
-	  resource.set("clone", value["clone"]);
-	  resource.set("ms", value["ms"]);
-	  resource.set("failed", value["failed"]);
-	  resource.set("orphaned", value["orphaned"]);
-	  resource.set("options", value["options"]);
-	  resource.set("location_constraints", res_loc_constraints[value["id"]]);
-	  resource.set("ordering_constraints", res_ord_constraints[value["id"]]);
-	  resource.set("ordering_set_constraints", res_ord_set_constraints[value["id"]]);
-	  resource.set("colocation_constraints", res_col_constraints[value["id"]]);
-	  resource.set("stonith", value["stonith"]);
-	  resource.set("meta_attr", value["meta_attr"]);
-	}
-      });
-      if (found == false) {
-	resource = Pcs.Resource.create({
-	  name: value["id"],
-	  agentname: value["agentname"],
-	  active: value["active"],
-	  disabled: value["disabled"],
-	  nodes: value["nodes"],
-	  node_list: value["nodes"].join(", "),
-	  group: value["group"],
-	  clone: value["clone"],
-	  ms: value["ms"],
-	  failed: value["failed"],
-	  orphaned: value["orphaned"],
-	  options: value["options"],
-	  location_constraints: res_loc_constraints[value["id"]],
-	  ordering_constraints: res_ord_constraints[value["id"]],
-	  ordering_set_constraints: res_ord_set_constraints[value["id"]],
-	  colocation_constraints: res_col_constraints[value["id"]],
-	  stonith: value["stonith"],
-	  meta_attr: value["meta_attr"]
-	});
-      }
-      var pathname = window.location.pathname.split('/');
-
-      if (cur_res_holder == "") {
-	cur_res_name = Pcs.opening_resource;
-      } else {
-	cur_res_name = cur_res_holder;
-      }
-
-      if (resource.name == cur_res_name) {
-	resource.set("cur_resource",true);
-	self.set("cur_resource", resource);
-	if (Pcs.cur_page == "resources") { self.set("cur_resource_res", resource);}
-	if (Pcs.cur_page == "stonith") { self.set("cur_resource_stonith", resource);}
-      }
-
-      if (resource.name == cur_res_holder_res) {
-	resource.set("cur_resource",true);
-	self.set("cur_resource_res", resource);
-      }
-
-      if (resource.name == cur_res_holder_ston) {
-	resource.set("cur_resource",true);
-	self.set("cur_resource_ston", resource);
-      }
-
-      if (resources_checked[resource.name])
-	resource.set('checked', true);
-
-      if (found == false)
-	self.pushObject(resource);
-    });
-    
-    var resourcesToRemove = [];
-    $.each(self.content, function(key, res) {
-      found = false;
-      $.each(resources, function(k2, res2) {
-      	if (res && res2["id"] == res.name) {
-      	  found = true;
-	}
+        self.pushObject(setting);
       });
-      if (!found && res) {
-	resourcesToRemove.push(res);
-      }
-    });
-
-    // If any resources have been renamed or removed we remove them content
-    $.each(resourcesToRemove, function(k, v) {
-      self.content.removeObject(v);
-    });
-
-    // Set defaults if not resources are set
-    if (self.content && self.content.length > 0) {
-      if (self.cur_resource_ston == null) {
-	for (var i=0; i< self.content.length; i++) {
-	  if (self.content[i].stonith) {
-	    self.set("cur_resource_ston", self.content[i]);
-	    self.content[i].set("cur_resource",true);
-	    break;
-	  }
-	}
-      }
-      if (self.cur_resource_res == null) {
-	for (var i=0; i< self.content.length; i++) {
-	  if (!self.content[i].stonith) {
-	    self.set("cur_resource_res", self.content[i]);
-	    self.content[i].set("cur_resource",true);
-	    break;
-	  }
-	}
-      }
-      if (self.cur_resource == null) {
-	if (Pcs.cur_page == "resources") {
-	  self.set("cur_resource", self.cur_resource_res);
-	}
-	if (Pcs.cur_page == "stonith") {
-	  self.set("cur_resource", self.cur_resource_ston);
-	}
-      }
     }
   }
 });
@@ -1135,19 +1685,14 @@ Pcs.selectedNodeController = Ember.Object.createWithMixins({
 
 Pcs.nodesController = Ember.ArrayController.createWithMixins({
   content: [],
+  utilization_support: false,
   cur_node: null,
   cur_node_attr: function () {
-    var ret_val = [];
     var nc = this;
-    $.each(this.content, function(node, value) {
-      if ("node_attrs" in value && nc.cur_node && value["node_attrs"]) {
-        if (nc.cur_node.name in value["node_attrs"]) {
-          ret_val = ret_val.concat(value["node_attrs"][nc.cur_node.name]);
-        }
-        return false;
-      }
-    });
-    return ret_val;
+    if (nc.get('cur_node')) {
+      return nc.get('cur_node').get('node_attrs');
+    }
+    return [];
   }.property("cur_node", "content. at each.node_attrs"),
   cur_node_fence_levels: function () {
     var ret_val = [];
@@ -1175,39 +1720,33 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({
   update: function(data){
     var self = this;
     var nodes = [];
-    corosync_nodes_online = [];
-    pacemaker_nodes_online = [];
-    pacemaker_nodes_standby = [];
-    $.each(data, function(key, value) {
-      nodes.push(key);
-      if (value["corosync_online"])
-	corosync_nodes_online = corosync_nodes_online.concat(value["corosync_online"]);
-      if (value["pacemaker_online"])
-	pacemaker_nodes_online = pacemaker_nodes_online.concat(value["pacemaker_online"]);
-      if (value["pacemaker_standby"])
-	pacemaker_nodes_standby = pacemaker_nodes_standby.concat(value["pacemaker_standby"]);
-    });
-    nodes.sort();
+    var corosync_nodes_online = data["corosync_online"];
+    var pacemaker_nodes_online = data["pacemaker_online"];
+    var pacemaker_nodes_standby = data["pacemaker_standby"];
+
     var resources_on_nodes = {};
     var lc_on_nodes = {};
-    $.each(data, function(node, node_info) {
-      resources_on_nodes[node] = [];
-      lc_on_nodes[node] = [];
-      if (node_info["resources"]) {
-	$.each(node_info["resources"], function(key, resource) {
-	  $.each(resource["nodes"], function(node_key, resource_on_node) {
-	    if (resources_on_nodes[resource_on_node])
-	      resources_on_nodes[resource_on_node].push(resource["id"]);
-	    else
-	      resources_on_nodes[resource_on_node] = [resource["id"]];
-	  });
-	});
-      }
-      if (node_info["constraints"] && node_info["constraints"]["rsc_location"]) {
-	$.each(node_info["constraints"]["rsc_location"], function(key, constraint) {
-	  if (constraint["node"] == node)
-	    lc_on_nodes[node].push(constraint)
-	});
+    $.each(data['node_list'], function(index, node) {
+      nodes.push(node.name);
+
+      resources_on_nodes[node.name] = [];
+      $.each(Pcs.resourcesContainer.get('resource_map'), function(resource_id, resource_obj) {
+        var nodes_running_on = resource_obj.get('nodes_running_on');
+        if (nodes_running_on) {
+          $.each(nodes_running_on, function(index, node_name) {
+            if (node.name == node_name) {
+              resources_on_nodes[node.name].push(resource_id);
+            }
+          });
+        }
+      });
+
+      lc_on_nodes[node.name] = [];
+      if (data["constraints"] && data["constraints"]["rsc_location"]) {
+        $.each(data["constraints"]["rsc_location"], function(key, constraint) {
+          if (constraint["node"] == node.name)
+            lc_on_nodes[node.name].push(constraint)
+        });
       }
     });
 
@@ -1217,115 +1756,135 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({
       cur_node_holder = self.cur_node.name;
     $.each(self.content, function (key, value) {
       if (value.checked)
-	nodes_checked[value.name] = true;
+        nodes_checked[value.name] = true;
     });
 
-//    self.set('content',[]);
-    $.each(nodes, function(key, node_id) {
+    if (data["nodes_utilization"]) {
+      self.set("utilization_support", true);
+    } else {
+      self.set("utilization_support", false);
+    }
+
+    $.each(data['node_list'], function(_, node_obj) {
+      var node_id = node_obj.name;
       if ($.inArray(node_id, corosync_nodes_online) > -1) {
-	corosync_online = true;
+        corosync_online = true;
       } else {
-	corosync_online = false;
+        corosync_online = false;
       }
 
       if ($.inArray(node_id, pacemaker_nodes_online) > -1) {
-	pacemaker_online = true;
+        pacemaker_online = true;
       } else {
-	pacemaker_online = false;
+        pacemaker_online = false;
       }
 
       if ($.inArray(node_id, pacemaker_nodes_standby) > -1) {
-	pacemaker_standby = true;
+        pacemaker_standby = true;
       } else {
-	pacemaker_standby = false;
+        pacemaker_standby = false;
       }
 
-      if (data[node_id]["noresponse"] == true) {
-	pcsd_daemon = false
+      if (node_obj["status"] == 'unknown') {
+        pcsd_daemon = false
       } else {
-	pcsd_daemon = true
+        pcsd_daemon = true
       }
 
-      if (data[node_id]["notauthorized"] == "true" || data[node_id]["notoken"] == true) {
-	authorized = false;
+      if (node_obj["notauthorized"] == "true" || node_obj["notoken"] == true) {
+        authorized = false;
       } else {
-	authorized = true;
+        authorized = true;
       }
 
-      if (data[node_id]["corosync"] && data[node_id]["pacemaker"] &&
-		pacemaker_online && corosync_online) {
-	up_status = true;
+      if (node_obj["corosync"] && node_obj["pacemaker"] &&
+        pacemaker_online && corosync_online) {
+        up_status = true;
       } else {
-       up_status = false;
-      }       
+        up_status = false;
+      }
+
+      var node_attr = [];
+      if (data["node_attr"] && data["node_attr"][node_id]) {
+        node_attr = data["node_attr"][node_id];
+      }
+
+      var utilization = [];
+      if (data["nodes_utilization"] && data["nodes_utilization"][node_id]) {
+        utilization = data["nodes_utilization"][node_id];
+      }
 
       found = false;
       var node = null;
       $.each(self.content, function(key, pre_existing_node) {
-	if (pre_existing_node && pre_existing_node.name == node_id) {
-	  node = pre_existing_node;
-	  found = true;
-	  node.set("authorized",authorized);
-	  node.set("up",up_status);
-	  node.set("pcsd",pcsd_daemon && authorized);
-	  node.set("corosync_daemon", data[node_id]["corosync"]);
-	  node.set("corosync_enabled", data[node_id]["corosync_enabled"]);
-	  node.set("pacemaker_daemon", data[node_id]["pacemaker"]);
-	  node.set("pacemaker_enabled", data[node_id]["pacemaker_enabled"]);
-	  node.set("pcsd_enabled", data[node_id]["pcsd_enabled"]);
-	  node.set("corosync", corosync_online);
-	  node.set("pacemaker", pacemaker_online);
-	  node.set("pacemaker_standby", pacemaker_standby);
-	  node.set("cur_node",false);
-	  node.set("running_resources", Pcs.getResourcesFromID($.unique(resources_on_nodes[node_id].sort().reverse())));
-	  node.set("location_constraints", lc_on_nodes[node_id].sort());
-	  node.set("uptime", data[node_id]["uptime"]);
-	  node.set("node_id", data[node_id]["node_id"]);
-	  node.set("node_attrs", data[node_id]["node_attr"]);
-	  node.set("fence_levels", data[node_id]["fence_levels"]);
-	}
+        if (pre_existing_node && pre_existing_node.name == node_id) {
+          node = pre_existing_node;
+          found = true;
+          node.set("authorized",authorized);
+          node.set("up",up_status);
+          node.set("pcsd",pcsd_daemon && authorized);
+          node.set("corosync_daemon", node_obj["corosync"]);
+          node.set("corosync_enabled", node_obj["corosync_enabled"]);
+          node.set("pacemaker_daemon", node_obj["pacemaker"]);
+          node.set("pacemaker_enabled", node_obj["pacemaker_enabled"]);
+          node.set("pcsd_enabled", node_obj["pcsd_enabled"]);
+          node.set("corosync", corosync_online);
+          node.set("pacemaker", pacemaker_online);
+          node.set("pacemaker_standby", pacemaker_standby);
+          node.set("cur_node",false);
+          node.set("running_resources", Pcs.getResourcesFromID($.unique(resources_on_nodes[node_id].sort().reverse())));
+          node.set("location_constraints", lc_on_nodes[node_id].sort());
+          node.set("uptime", node_obj["uptime"]);
+          node.set("node_id", node_obj["id"]);
+          node.set("node_attrs", node_attr);
+          node.set("fence_levels", data["fence_levels"]);
+          node.set("status", node_obj["status"]);
+          node.set("utilization", utilization);
+        }
       });
 
       if (found == false) {
-	var node = Pcs.Clusternode.create({
-	  name: node_id,
-	  authorized:  authorized,
-	  up: up_status,
-	  pcsd: pcsd_daemon && authorized,
-	  corosync_daemon: data[node_id]["corosync"],
-	  corosync_enabled: data[node_id]["corosync_enabled"],
-	  pacemaker_daemon: data[node_id]["pacemaker"],
-	  pacemaker_enabled: data[node_id]["pacemaker_enabled"],
-	  pcsd_enabled: data[node_id]["pcsd_enabled"],
-	  corosync: corosync_online,
-	  pacemaker: pacemaker_online,
-	  pacemaker_standby: pacemaker_standby,
-	  cur_node: false,
-	  running_resources: Pcs.getResourcesFromID($.unique(resources_on_nodes[node_id].sort().reverse())),
-	  location_constraints: lc_on_nodes[node_id].sort(),
-	  uptime: data[node_id]["uptime"],
-	  node_id: data[node_id]["node_id"],
-	  node_attrs: data[node_id]["node_attr"],
-	  fence_levels: data[node_id]["fence_levels"]
-	});
+        var node = Pcs.Clusternode.create({
+          name: node_id,
+          authorized:  authorized,
+          up: up_status,
+          pcsd: pcsd_daemon && authorized,
+          corosync_daemon: node_obj["corosync"],
+          corosync_enabled: node_obj["corosync_enabled"],
+          pacemaker_daemon: node_obj["pacemaker"],
+          pacemaker_enabled: node_obj["pacemaker_enabled"],
+          pcsd_enabled: node_obj["pcsd_enabled"],
+          corosync: corosync_online,
+          pacemaker: pacemaker_online,
+          pacemaker_standby: pacemaker_standby,
+          cur_node: false,
+          running_resources: Pcs.getResourcesFromID($.unique(resources_on_nodes[node_id].sort().reverse())),
+          location_constraints: lc_on_nodes[node_id].sort(),
+          uptime: node_obj["uptime"],
+          node_id: node_obj["id"],
+          node_attrs: node_attr,
+          fence_levels: data["fence_levels"],
+          status: node_obj["status"],
+          utilization: utilization
+        });
       }
       var pathname = window.location.pathname.split('/');
 
       if (cur_node_holder == "") {
-	cur_node_name = Pcs.opening_node;
+        cur_node_name = Pcs.opening_node;
       } else {
-	cur_node_name = cur_node_holder;
+        cur_node_name = cur_node_holder;
       }
       if (node.name == cur_node_name) {
-	node.set("cur_node",true);
-	self.set("cur_node", node);
+        node.set("cur_node",true);
+        self.set("cur_node", node);
       }
 
       if (nodes_checked[node.name])
-	node.set("checked",true);
+        node.set("checked",true);
 
       if (found == false)
-	self.pushObject(node);
+        self.pushObject(node);
     });
     if (self.content && self.content.length > 0 && self.cur_node == null) {
       self.set("cur_node", self.content[0]);
@@ -1336,17 +1895,18 @@ Pcs.nodesController = Ember.ArrayController.createWithMixins({
     $.each(self.content, function (key, node) {
       found = false;
       $.each(nodes, function (k,v) {
-	if (v == node.name)
-	  found = true;
+        if (v == node.name)
+          found = true;
       });
       if (!found) {
-	nodesToRemove.push(node);
+        nodesToRemove.push(node);
       }
     });
 
     $.each(nodesToRemove, function(k,v) {
       self.content.removeObject(v);
     });
+    self.set('content', Ember.copy(self.get('content').sort(function(a,b){return a.get('name').localeCompare(b.get('name'))})));
   }
 });
 
@@ -1355,4 +1915,8 @@ function myUpdate() {
 //  window.setTimeout(myUpdate,4000);
 }
 
-Pcs.update(true);
+Pcs.set('updater', Pcs.Updater.create({
+  timeout: 20000,
+  update_function: Pcs._update,
+  update_target: Pcs
+}));
diff --git a/pcsd/public/js/pcsd.js b/pcsd/public/js/pcsd.js
index de0a939..4412e49 100644
--- a/pcsd/public/js/pcsd.js
+++ b/pcsd/public/js/pcsd.js
@@ -1,11 +1,11 @@
 var pcs_timeout = 30000;
 
 function curResource() {
-  return Pcs.resourcesController.cur_resource.name
+  return Pcs.resourcesContainer.get('cur_resource').get('id')
 }
 
 function curStonith() {
-  return Pcs.resourcesController.cur_resource.name
+  return Pcs.resourcesContainer.get('cur_fence').get('id')
 }
 
 function configure_menu_show(item) {
@@ -36,10 +36,7 @@ function select_menu(menu, item, initial) {
   }
 
   if (menu == "RESOURCES") {
-    Pcs.set('cur_page',"resources")
-    Pcs.resourcesController.set("cur_resource",Pcs.resourcesController.cur_resource_res);
-    if (item)
-      Pcs.resourcesController.load_resource($('[nodeID="'+item+'"]'));
+    Pcs.set('cur_page',"resources");
     menu_show("resource", true);
   } else {
     menu_show("resource", false);
@@ -47,9 +44,6 @@ function select_menu(menu, item, initial) {
 
   if (menu == "FENCE DEVICES") {
     Pcs.set('cur_page',"stonith");
-    Pcs.resourcesController.set("cur_resource",Pcs.resourcesController.cur_resource_ston);
-    if (item)
-      Pcs.resourcesController.load_stonith($('[nodeID='+item+']'));
     menu_show("stonith", true);
   } else {
     menu_show("stonith", false);
@@ -62,6 +56,13 @@ function select_menu(menu, item, initial) {
     menu_show("cluster", false);
   }
 
+  if (menu == "PERMISSIONS") {
+    Pcs.set('cur_page', "permissions");
+    menu_show("cluster", true);
+  } else {
+    menu_show("cluster", false);
+  }
+
   if (menu == "CONFIGURE") {
     Pcs.set('cur_page',"configure");
     menu_show("configure", true);
@@ -96,57 +97,131 @@ function create_group() {
 
   if (num_nodes == 0) {
     alert("You must select at least one resource to add to a group");
-  } else {
-    $("#resources_to_add_to_group").val(node_names);
-    $("#add_group").dialog({title: 'Create Group',
-      modal: true, resizable: false, 
-      buttons: {
-	Cancel: function() {
-	  $(this).dialog("close");
-	},
-	"Create Group": function() {
-	  var data = $('#add_group > form').serialize();
-	  var url = get_cluster_remote_url() + "add_group";
-	  $.ajax({
-	    type: "POST",
-	    url: url,
-	    data: data,
-	    success: function() {
-	      Pcs.update();
-	      $("#add_group").dialog("close");
-	      reload_current_resource();
-	    },
-	    error: function (xhr, status, error) {
-	      alert(xhr.responseText);
-	      $("#add_group").dialog("close");
-	    }
-	  });
-	}
+    return;
+  }
+
+  $("#resources_to_add_to_group").val(node_names);
+  $("#add_group").dialog({
+    title: 'Create Group',
+    modal: true,
+    resizable: false,
+    buttons: {
+      Cancel: function() {
+        $(this).dialog("close");
+      },
+      "Create Group": function() {
+        var data = $('#add_group > form').serialize();
+        var url = get_cluster_remote_url() + "add_group";
+        $.ajax({
+          type: "POST",
+          url: url,
+          data: data,
+          success: function() {
+            Pcs.update();
+            $("#add_group").dialog("close");
+          },
+          error: function (xhr, status, error) {
+            alert(
+              "Error creating group "
+              + ajax_simple_error(xhr, status, error)
+            );
+            $("#add_group").dialog("close");
+          }
+        });
       }
-    });
+    }
+  });
+}
+
+function add_node_dialog() {
+  var buttonOpts = [
+    {
+      text: "Add Node",
+      id: "add_node_submit_btn",
+      click: function() {
+        $("#add_node_submit_btn").button("option", "disabled", true);
+        checkAddingNode();
+      }
+    },
+    {
+      text: "Cancel",
+      click: function() {
+        $(this).dialog("close");
+      }
+    }
+  ];
+
+  buttonOpts["Cancel"] = function() {
+    $(this).dialog("close");
+  };
+
+  // If you hit enter it triggers the first button: Add Node
+  $('#add_node').keypress(function(e) {
+    if (e.keyCode == $.ui.keyCode.ENTER && !$("#add_node_submit_btn").button("option", "disabled")) {
+        $("#add_node_submit_btn").trigger("click");
+      return false;
+    }
+  });
+
+  $('#add_node').dialog({
+    title: 'Add Node',
+    modal:true,
+    resizable: false,
+    width: 'auto',
+    buttons: buttonOpts
+  });
+}
+
+function checkAddingNode(){
+  var nodeName = $("#add_node").children("form").find("[name='new_nodename']").val().trim();
+  if (nodeName == "") {
+    $("#add_node_submit_btn").button("option", "disabled", false);
+    return false;
   }
+
+  $.ajax({
+    type: 'POST',
+    url: '/remote/check_gui_status',
+    data: {"nodes": nodeName},
+    timeout: pcs_timeout,
+    success: function (data) {
+      var mydata = jQuery.parseJSON(data);
+      if (mydata[nodeName] == "Unable to authenticate") {
+        auth_nodes_dialog([nodeName], function(){$("#add_node_submit_btn").trigger("click");});
+        $("#add_node_submit_btn").button("option", "disabled", false);
+      } else if (mydata[nodeName] == "Offline") {
+        alert("Unable to contact node '" + nodeName + "'");
+        $("#add_node_submit_btn").button("option", "disabled", false);
+      } else {
+        create_node($("#add_node").children("form"));
+      }
+    },
+    error: function (XMLHttpRequest, textStatus, errorThrown) {
+      alert("ERROR: Unable to contact server");
+      $("#add_node_submit_btn").button("option", "disabled", false);
+    }
+  });
 }
 
 function create_node(form) {
-  dataString = $(form).serialize();
-  var nodeName = $(form).find("[name='new_nodename']").val();
-  url = get_cluster_remote_url() + $(form).attr("action");
+  var dataString = $(form).serialize();
+  dataString += "&clustername=" + get_cluster_name();
   $.ajax({
     type: "POST",
-    url: url,
+    url: "/remote/add_node_to_cluster",
     data: dataString,
-    dataType: "json",
     success: function(returnValue) {
-      $('input.create_node').show();
-      Pcs.update();
+      $("#add_node_submit_btn").button("option", "disabled", false);
       $('#add_node').dialog('close');
+      Pcs.update();
     },
     error: function(error) {
       alert(error.responseText);
-      $('input.create_node').show();
+      $("#add_node_submit_btn").button("option", "disabled", false);
     }
   });
 }
+
 // If update is set to true we update the resource instead of create it
 // if stonith is set to true we update/create a stonith agent
 function create_resource(form, update, stonith) {
@@ -168,25 +243,33 @@ function create_resource(form, update, stonith) {
     success: function(returnValue) {
       $('input.apply_changes').show();
       if (returnValue["error"] == "true") {
-	alert(returnValue["stderr"]);
+        alert(returnValue["stderr"]);
       } else {
-	Pcs.update();
-	if (!update) {
-	  if (stonith)
-	    $('#add_stonith').dialog('close');
-	  else
-	    $('#add_resource').dialog('close');
-	} else { 
-	  reload_current_resource();
-	}
+        Pcs.update();
+        if (!update) {
+          if (stonith)
+            $('#add_stonith').dialog('close');
+          else
+            $('#add_resource').dialog('close');
+        } else {
+          reload_current_resource();
+        }
       }
     },
-    error: function() {
-      if (update)
-	alert("Unable to update " + name);
-      else
-	alert("Unable to add " + name);
-      $('#apply_changes').fadeIn();
+    error: function(xhr, status, error) {
+      if (update) {
+        alert(
+          "Unable to update " + name + " "
+          + ajax_simple_error(xhr, status, error)
+        );
+      }
+      else {
+        alert(
+          "Unable to add " + name + " "
+          + ajax_simple_error(xhr, status, error)
+        );
+      }
+      $('input.apply_changes').show();
     }
   });
 }
@@ -200,15 +283,14 @@ function disable_spaces(item) {
 }
 
 function load_resource_form(item, ra, stonith) {
-  data = { "new": true, resourcename: ra};
+  var data = { new: true, resourcename: ra};
+  var command;
   if (!stonith)
     command = "resource_metadata";
   else
     command = "fence_device_metadata";
   
-  item.load(get_cluster_remote_url() + command, data, function() {
-    disable_spaces(this);
-  });
+  item.load(get_cluster_remote_url() + command, data);
 }
 
 function update_resource_form_groups(form, group_list) {
@@ -233,104 +315,106 @@ function update_resource_form_groups(form, group_list) {
   select.replaceWith(select_new);
 }
 
-function verify_node_remove() {
-  var buttonOpts = {};
-  var ids = []
-  $.each($('.node_list_check :checked'), function (i,e) {
-    ids.push($(e).parent().parent().attr("nodeID"));
-  });
-  buttonOpts["Remove Node(s)"] = function() {
-    if (ids.length > 0) {
-      remove_nodes(ids);
-    }
+function verify_remove(remove_func, forceable, checklist_id, dialog_id, label, ok_text, title, remove_id) {
+  var remove_id_list = new Array();
+  if (remove_id) {
+    remove_id_list = [remove_id];
   }
-  buttonOpts["Cancel"] = function() {
-    $(this).dialog("close");
-  };
-
-  list_of_nodes = "<ul>";
-  $('.node_list_check :checked').each(function (i,e) {
-    list_of_nodes += "<li>" + $(e).parent().parent().attr("nodeID") + "</li>";
-  });
-  list_of_nodes += "</ul>";
-  $("#nodes_to_remove").html(list_of_nodes);
-  if (ids.length == 0) {
-    alert("You must select at least one node to remove");
+  else {
+    remove_id_list = get_checked_ids_from_nodelist(checklist_id);
+  }
+  if (remove_id_list.length < 1) {
+    alert("You must select at least one " + label + " to remove.");
     return;
   }
 
-  $("#remove_node").dialog({title: "Remove Node",
-    modal: true, resizable: false,
+  var buttonOpts = [
+    {
+      text: ok_text,
+      id: "verify_remove_submit_btn",
+      click: function() {
+        if (remove_id_list.length < 1) {
+          return;
+        }
+        $("#verify_remove_submit_btn").button("option", "disabled", true);
+        if (forceable) {
+          force = $("#" + dialog_id + " :checked").length > 0
+          remove_func(remove_id_list, force);
+        }
+        else {
+          remove_func(remove_id_list);
+        }
+      }
+    },
+    {
+      text: "Cancel",
+      id: "verify_remove_cancel_btn",
+      click: function() {
+        $(this).dialog("destroy");
+        if (forceable) {
+          $("#" + dialog_id + " input[name=force]").attr("checked", false);
+        }
+      }
+    }
+  ];
+
+  var name_list = "<ul>";
+  $.each(remove_id_list, function(key, remid) {
+    name_list += "<li>" + remid + "</li>";
+  });
+  name_list += "</ul>";
+  $("#" + dialog_id + " .name_list").html(name_list);
+  $("#" + dialog_id).dialog({
+    title: title,
+    modal: true,
+    resizable: false,
     buttons: buttonOpts
   });
 }
 
-function verify_remove(rem_type, error_message, ok_message, title_message, resource_id, post_location) {
-  if (!error_message)
-    if (rem_type == "resource")
-      error_message = "You must select at least one resource.";
-    else
-      error_message = "You must select at least one fence device.";
-  if (!ok_message)
-    ok_message = "Remove resource(s)";
-  if (!title_message)
-    title_message = "Resource Removal";
-  if (!post_location)
-    post_location = "/resourcerm";
-
-  var buttonOpts = {}
-  buttonOpts[ok_message] = function() {
-    if (resource_id) {
-      if (rem_type == "cluster")
-	remove_cluster([resource_id]);
-      else
-	remove_resource([resource_id]);
-    } else {
-      ids = []
-      $.each($('#'+rem_type+'_list .node_list_check :checked'), function (i,e) {
-	ids.push($(e).parent().parent().attr("nodeID"))
-      });
-      if (ids.length > 0) {
-	if (rem_type == "cluster")
-	  remove_cluster(ids);
-	else
-	  remove_resource(ids);
-      }
-    }
-    $(this).dialog("close");
-//    if (rem_type == "cluster")
- //     document.location.reload();
-  };
-  buttonOpts["Cancel"] = function() {
-    $(this).dialog("close");
-  };
+function verify_remove_clusters(cluster_id) {
+  verify_remove(
+    remove_cluster, false, "cluster_list", "dialog_verify_remove_clusters",
+    "cluster", "Remove Cluster(s)", "Cluster Removal", cluster_id
+  );
+}
 
-  var list_of_nodes = "<ul>";
-  var nodes_to_remove = 0;
+function verify_remove_nodes(node_id) {
+  verify_remove(
+    remove_nodes, false, "node_list", "dialog_verify_remove_nodes",
+    "node", "Remove Node(s)", "Remove Node", node_id
+  );
+}
 
-  if (resource_id) {
-    list_of_nodes += "<li>" + resource_id +"</li>";
-    nodes_to_remove++;
-  } else {
-    $("#"+rem_type+"_list :checked").each(function (index,element) {
-      if ($(element).parent().parent().attr("nodeID")) {
-	if ($(element).is(':visible')) {
-	  list_of_nodes += "<li>" + $(element).parent().parent().attr("nodeID")+"</li>";
-	  nodes_to_remove++;
-	}
-      }
-    });
-  }
-  list_of_nodes += "</ul>";
-  if (nodes_to_remove != 0) {
-    $("#resource_to_remove").html(list_of_nodes);
-    $("#verify_remove").dialog({title: title_message,
-      modal: true, resizable: false,
-      buttons: buttonOpts
-    });
-  } else {
-    alert(error_message);
-  }
+function verify_remove_resources(resource_id) {
+  verify_remove(
+    remove_resource, true, "resource_list", "dialog_verify_remove_resources",
+    "resource", "Remove resource(s)", "Resurce Removal", resource_id
+  );
+}
+
+function verify_remove_fence_devices(resource_id) {
+  verify_remove(
+    remove_resource, false, "stonith_list", "dialog_verify_remove_resources",
+    "fence device", "Remove device(s)", "Fence Device Removal", resource_id
+  );
+}
+
+function verify_remove_acl_roles(role_id) {
+  verify_remove(
+    remove_acl_roles, false, "acls_roles_list", "dialog_verify_remove_acl_roles",
+    "ACL role", "Remove Role(s)", "Remove ACL Role", role_id
+  );
+}
+
+function get_checked_ids_from_nodelist(nodelist_id) {
+  var ids = new Array()
+  $("#" + nodelist_id + " .node_list_check :checked").each(function (index, element) {
+    if($(element).parent().parent().attr("nodeID")) {
+      ids.push($(element).parent().parent().attr("nodeID"));
+    }
+  });
+  return ids;
 }
 
 function remote_node_update() {
@@ -358,9 +442,9 @@ function local_node_update(node, data) {
 
   for (var n in data) {
     if (data[n].pacemaker_online && (jQuery.inArray(n, data[n].pacemaker_online) != -1)) {
-	setNodeStatus(n, true);
+      setNodeStatus(n, true);
     } else {
-    	setNodeStatus(n,false);
+      setNodeStatus(n,false);
     }
   }
 }
@@ -371,69 +455,6 @@ function disable_checkbox_clicks() {
   });
 }
 
-// TODO: REMOVE
-function resource_list_update() {
-  resource = $('#node_info_header_title_name').first().text();
-
-  // If resources are checked we need to keep them selected on refresh
-  var checkedResources = new Array();
-  $('.node_list_check :checked').each(function(i,e) {
-    checkedResources.push($(e).attr("res_id"));
-  });
-
-  $.ajax({
-    type: 'GET',
-    url: '/resource_list/'+resource,
-    timeout: pcs_timeout,
-    success: function(data) {
-      try {
-	newdata = $(data);
-      } catch(err) {
-	newdata = $("");
-      }
-      newdata.find('.node_list_check input[type=checkbox]').each( function(i,e) {
-	var res_id = $(e).attr("res_id");
-	for (var i=checkedResources.length-1; i>= 0; --i) {
-	  if (checkedResources[i] == res_id) {
-	    $(e).prop("checked",true);
-	  }
-	}
-      });
-      
-      $("#node_list").html(newdata);
-      disable_checkbox_clicks();
-      window.setTimeout(resource_list_update, pcs_timeout);
-    },
-    error: function (XMLHttpRequest, textStatus, errorThrown) {
-      window.setTimeout(resource_list_update, 60000);
-    }
-  });
-}
-
-// TODO: REMOVE
-function resource_update() {
-  resource = $('#node_info_header_title_name').first().text();
-  $.ajax({
-    type: 'GET',
-    url: '/remote/resource_status?resource='+resource,
-    timeout: pcs_timeout,
-    success: function(data) {
-      data = jQuery.parseJSON(data);
-      $("#cur_res_loc").html(data.location);
-      $("#res_status").html(data.status);
-      if (data.status == "Running") {
-	setStatus($("#res_status"), 0);
-      } else {
-	setStatus($("#res_status"), 1);
-      }
-      window.setTimeout(resource_update, pcs_timeout);
-    },
-    error: function (XMLHttpRequest, textStatus, errorThrown) {
-      window.setTimeout(resource_update, 60000);
-    }
-  });
-}
-
 // Set the status of a service
 // 0 = Running (green)
 // 1 = Stopped (red)
@@ -471,32 +492,42 @@ function fade_in_out(id) {
   });
 }
 
+function node_link_action(link_selector, url, label) {
+  var node = $.trim($("#node_info_header_title_name").text());
+  fade_in_out(link_selector);
+  $.ajax({
+    type: 'POST',
+    url: url,
+    data: {"name": node},
+    success: function() {
+    },
+    error: function (xhr, status, error) {
+      alert(
+        "Unable to " + label + " node '" + node + "' "
+        + ajax_simple_error(xhr, status, error)
+      );
+    }
+  });
+}
+
 function setup_node_links() {
   Ember.debug("Setup node links");
   $("#node_start").click(function() {
-    node = $("#node_info_header_title_name").text();
-    fade_in_out("#node_start");
-    $.post('/remote/cluster_start',{"name": $.trim(node)});
+    node_link_action("#node_start", "/remote/cluster_start", "start");
   });
   $("#node_stop").click(function() {
-    node = $("#node_info_header_title_name").text();
+    var node = $.trim($("#node_info_header_title_name").text());
     fade_in_out("#node_stop");
-    node_stop($.trim(node), false);
+    node_stop(node, false);
   });
   $("#node_restart").click(function() {
-    node = $("#node_info_header_title_name").text();
-    fade_in_out("#node_restart");
-    $.post('/remote/node_restart', {"name": $.trim(node)});
+    node_link_action("#node_restart", "/remote/node_restart", "restart");
   });
   $("#node_standby").click(function() {
-    node = $("#node_info_header_title_name").text();
-    fade_in_out("#node_standby");
-    $.post('/remote/node_standby', {"name": $.trim(node)});
+    node_link_action("#node_standby", "/remote/node_standby", "standby");
   });
   $("#node_unstandby").click(function() {
-    node = $("#node_info_header_title_name").text();
-    fade_in_out("#node_unstandby");
-    $.post('/remote/node_unstandby', {"name": $.trim(node)});
+    node_link_action("#node_unstandby", "/remote/node_unstandby", "unstandby");
   });
 }
 
@@ -524,8 +555,9 @@ function node_stop(node, force) {
         */
         return;
       }
-      var message = "Unable to stop node '" + node + "' (" + $.trim(error) + ")";
-      message += "\n" + xhr.responseText;
+      var message = "Unable to stop node '" + node + " " + ajax_simple_error(
+        xhr, status, error
+      );
       if (message.indexOf('--force') == -1) {
         alert(message);
       }
@@ -539,37 +571,49 @@ function node_stop(node, force) {
   });
 }
 
-function setup_resource_links(link_type) {
-  Ember.debug("Setup resource links");
-  $("#resource_delete_link").click(function () {
-    verify_remove("resource", null, "Remove resource", "Resource Removal", curResource(), "/resourcerm");
-  });
-  $("#stonith_delete_link").click(function () {
-    verify_remove("stonith", null, "Remove device(s)", "Fence Device Removal", curStonith(), "/fencerm")
-  });
-  $("#resource_stop_link").click(function () {
-    fade_in_out("#resource_stop_link");
-    $.post(get_cluster_remote_url() + 'resource_stop',"resource="+curResource());
-    Pcs.resourcesController.cur_resource.set("disabled",true);
-  });
-  $("#resource_start_link").click(function () {
-    fade_in_out("#resource_start_link");
-    $.post(get_cluster_remote_url() + 'resource_start',"resource="+curResource());
-    Pcs.resourcesController.cur_resource.set("disabled",false);
-  });
-  $("#resource_cleanup_link").click(function () {
-    fade_in_out("#resource_cleanup_link");
-    $.post(get_cluster_remote_url() + 'resource_cleanup',"resource="+curResource());
-  });
-  $("#stonith_cleanup_link").click(function () {
-    fade_in_out("#stonith_cleanup_link");
-    $.post(get_cluster_remote_url() + 'resource_cleanup',"resource="+curResource());
-  });
-  $("#resource_move_link").click(function () {
-    alert("Not Yet Implemented");
+function enable_resource() {
+  fade_in_out("#resource_start_link");
+  Pcs.resourcesContainer.enable_resource(curResource());
+}
+
+function disable_resource() {
+  fade_in_out("#resource_stop_link");
+  Pcs.resourcesContainer.disable_resource(curResource());
+}
+
+function cleanup_resource() {
+  var resource = curResource();
+  fade_in_out("#resource_cleanup_link");
+  $.ajax({
+    type: 'POST',
+    url: get_cluster_remote_url() + 'resource_cleanup',
+    data: {"resource": resource},
+    success: function() {
+    },
+    error: function (xhr, status, error) {
+      alert(
+        "Unable to cleanup resource '" + resource + "' "
+        + ajax_simple_error(xhr, status, error)
+      );
+    }
   });
-  $("#resource_history_link").click(function () {
-    alert("Not Yet Implemented");
+}
+
+function cleanup_stonith() {
+  var resource = curStonith();
+  fade_in_out("#stonith_cleanup_link");
+  $.ajax({
+    type: 'POST',
+    url: get_cluster_remote_url() + 'resource_cleanup',
+    data: {"resource": resource},
+    success: function() {
+    },
+    error: function (xhr, status, error) {
+      alert(
+        "Unable to cleanup resource '" + resource + "' "
+        + ajax_simple_error(xhr, status, error)
+      );
+    }
   });
 }
 
@@ -630,20 +674,166 @@ function checkClusterNodes() {
   });
 }
 
-function add_existing_dialog() {
-  var buttonOpts = {}
+function auth_nodes(dialog) {
+  $("#auth_failed_error_msg").hide();
+  $.ajax({
+    type: 'POST',
+    url: '/remote/auth_gui_against_nodes',
+    data: dialog.find("#auth_nodes_form").serialize(),
+    timeout: pcs_timeout,
+    success: function (data) {
+      mydata = jQuery.parseJSON(data);
+      auth_nodes_dialog_update(dialog, mydata);
+    },
+    error: function (XMLHttpRequest, textStatus, errorThrown) {
+      alert("ERROR: Unable to contact server");
+    }
+  });
+}
 
-  buttonOpts["Add Existing"] = function() {
-          checkExistingNode();
-  };
+function auth_nodes_dialog_update(dialog_obj, data) {
+  var unauth_nodes = [];
+  var node;
+  if (data['node_auth_error']) {
+    for (node in data['node_auth_error']) {
+      if (data['node_auth_error'][node] != 0) {
+        unauth_nodes.push(node);
+      }
+    }
+  }
 
-  buttonOpts["Cancel"] = function() {
-    $(this).dialog("close");
-  };
+  var callback_one = dialog_obj.dialog("option", "callback_success_one_");
+  var callback = dialog_obj.dialog("option", "callback_success_");
+  if (unauth_nodes.length == 0) {
+    dialog_obj.parent().find("#authenticate_submit_btn").button(
+      "option", "disabled", false
+    );
+    dialog_obj.find("#auth_failed_error_msg").hide();
+    dialog_obj.dialog("close");
+    if (callback_one !== null)
+      callback_one();
+    if (callback !== null)
+      callback();
+    return unauth_nodes;
+  } else {
+    dialog_obj.find("#auth_failed_error_msg").show();
+  }
+
+  if (unauth_nodes.length == 1) {
+    dialog_obj.find("#same_pass").hide();
+    dialog_obj.find('#auth_nodes_list').find('input:password').each(
+      function(){$(this).show()}
+    );
+  }
+
+  var one_success = false;
+  dialog_obj.find("input:password[name$=-pass]").each(function() {
+    node = $(this).attr("name");
+    node = node.substring(0, node.length - 5);
+    if (unauth_nodes.indexOf(node) == -1) {
+      $(this).parent().parent().remove();
+      one_success = true;
+    } else {
+      $(this).parent().parent().css("color", "red");
+    }
+  });
+
+  if (one_success && callback_one !== null)
+    callback_one();
+
+  dialog_obj.parent().find("#authenticate_submit_btn").button(
+    "option", "disabled", false
+  );
+  return unauth_nodes;
+}
+
+function auth_nodes_dialog(unauth_nodes, callback_success, callback_success_one) {
+  callback_success = typeof callback_success !== 'undefined' ? callback_success : null;
+  callback_success_one = typeof callback_success_one !== 'undefined' ? callback_success_one : null;
+
+  var buttonsOpts = [
+    {
+      text: "Authenticate",
+      id: "authenticate_submit_btn",
+      click: function() {
+        var dialog = $(this);
+        dialog.parent().find("#authenticate_submit_btn").button(
+          "option", "disabled", true
+        );
+        dialog.find("table.err_msg_table").find("span[id$=_error_msg]").hide();
+        auth_nodes(dialog);
+      }
+    },
+    {
+      text:"Cancel",
+      click: function () {
+        $(this).dialog("close");
+      }
+    }
+  ];
+  var dialog_obj = $("#auth_nodes").dialog({title: 'Authentification of nodes',
+    modal: true, resizable: false,
+    width: 'auto',
+    buttons: buttonsOpts,
+    callback_success_: callback_success,
+    callback_success_one_: callback_success_one
+  });
+
+  dialog_obj.find("#auth_failed_error_msg").hide();
+
+  // If you hit enter it triggers the submit button
+  dialog_obj.keypress(function(e) {
+    if (e.keyCode == $.ui.keyCode.ENTER && !dialog_obj.parent().find("#authenticate_submit_btn").button("option", "disabled")) {
+      dialog_obj.parent().find("#authenticate_submit_btn").trigger("click");
+      return false;
+    }
+  });
+
+  if (unauth_nodes.length == 0) {
+    if (callback_success !== null) {
+      callback_success();
+    }
+    return;
+  }
+
+  if (unauth_nodes.length == 1) {
+    dialog_obj.find("#same_pass").hide();
+  } else {
+    dialog_obj.find("#same_pass").show();
+    dialog_obj.find("input:checkbox[name=all]").prop("checked", false);
+    dialog_obj.find("#pass_for_all").val("");
+    dialog_obj.find("#pass_for_all").hide();
+  }
+
+  dialog_obj.find('#auth_nodes_list').empty();
+  unauth_nodes.forEach(function(node) {
+    dialog_obj.find('#auth_nodes_list').append("\t\t\t<tr><td>" + node + '</td><td><input type="password" name="' + node + '-pass"></td></tr>\n');
+  });
+
+}
+
+function add_existing_dialog() {
+  var buttonOpts = [
+    {
+      text: "Add Existing",
+      id: "add_existing_submit_btn",
+      click: function () {
+        $("#add_existing_cluster").find("table.err_msg_table").find("span[id$=_error_msg]").hide();
+        $("#add_existing_submit_btn").button("option", "disabled", true);
+        checkExistingNode();
+      }
+    },
+    {
+      text: "Cancel",
+      click: function() {
+        $(this).dialog("close");
+      }
+    }
+  ];
 
   // If you hit enter it triggers the first button: Add Existing
   $('#add_existing_cluster').keypress(function(e) {
-    if (e.keyCode == $.ui.keyCode.ENTER) {
+    if (e.keyCode == $.ui.keyCode.ENTER && !$("#add_existing_submit_btn").button("option", "disabled")) {
       $(this).parent().find("button:eq(1)").trigger("click");
       return false;
     }
@@ -660,8 +850,10 @@ function update_existing_cluster_dialog(data) {
   for (var i in data) {
     if (data[i] == "Online") {
       $('#add_existing_cluster_form').submit();
-      $('#add_existing_cluster_error_msg').hide();
-      $('#unable_to_connect_error_msg_ae').hide();
+      return;
+    } else if (data[i] == "Unable to authenticate") {
+      auth_nodes_dialog([i], function() {$("#add_existing_submit_btn").trigger("click");});
+      $("#add_existing_submit_btn").button("option", "disabled", false);
       return;
     }
     break;
@@ -671,6 +863,7 @@ function update_existing_cluster_dialog(data) {
     $('#add_existing_cluster_error_msg').show();
   }
   $('#unable_to_connect_error_msg_ae').show();
+  $("#add_existing_submit_btn").button("option", "disabled", false);
 }
 
 function update_create_cluster_dialog(nodes, version_info) {
@@ -682,7 +875,7 @@ function update_create_cluster_dialog(nodes, version_info) {
   }
 
   var cant_connect_nodes = 0;
-  var cant_auth_nodes = 0;
+  var cant_auth_nodes = [];
   var good_nodes = 0;
   var addr1_match = 1;
   var ring0_nodes = [];
@@ -696,27 +889,32 @@ function update_create_cluster_dialog(nodes, version_info) {
 
     $('#create_new_cluster input[name^="node-"]').each(function() {
       if ($(this).val() == "") {
-	$(this).parent().prev().css("background-color", "");
-	return;
+        $(this).parent().prev().css("background-color", "");
+        return;
       }
       for (var i = 0; i < keys.length; i++) {
-	if ($(this).val() == keys[i]) {
-	  if (nodes[keys[i]] != "Online") {
-	    if (nodes[keys[i]] == "Unable to authenticate") {
-	      $(this).parent().prev().css("background-color", "orange");
-	      cant_auth_nodes++;
-	    } else {
-	      $(this).parent().prev().css("background-color", "red");
-	      cant_connect_nodes++;
-	    }
-	  } else {
-	    $(this).parent().prev().css("background-color", "");
-	    good_nodes++;
-	  }
-	}
+        if ($(this).val() == keys[i]) {
+          if (nodes[keys[i]] != "Online") {
+            if (nodes[keys[i]] == "Unable to authenticate") {
+              cant_auth_nodes.push(keys[i]);
+            } else {
+              $(this).parent().prev().css("background-color", "red");
+              cant_connect_nodes++;
+            }
+          } else {
+            $(this).parent().prev().css("background-color", "");
+            good_nodes++;
+          }
+        }
       }
     });
 
+    if (cant_auth_nodes.length > 0) {
+      auth_nodes_dialog(cant_auth_nodes, function(){$("#create_cluster_submit_btn").trigger("click")});
+      $("#create_cluster_submit_btn").button("option", "disabled", false);
+      return;
+    }
+
   if (transport == "udpu") {
     $('#create_new_cluster input[name^="node-"]').each(function() {
       if ($(this).val().trim() != "") {
@@ -763,13 +961,13 @@ function update_create_cluster_dialog(nodes, version_info) {
     });
   }
 
-  if (cant_connect_nodes != 0 || cant_auth_nodes != 0) {
+  if (cant_connect_nodes != 0) {
     $("#unable_to_connect_error_msg").show();
   } else {
     $("#unable_to_connect_error_msg").hide();
   }
 
-  if (good_nodes == 0 && cant_connect_nodes == 0 && cant_auth_nodes == 0) {
+  if (good_nodes == 0 && cant_connect_nodes == 0) {
     $("#at_least_one_node_error_msg").show();
   } else {
     $("#at_least_one_node_error_msg").hide();
@@ -826,8 +1024,10 @@ function update_create_cluster_dialog(nodes, version_info) {
     $("#rhel_version_mismatch_error_msg").hide();
   }
 
-  if (good_nodes != 0 && cant_connect_nodes == 0 && cant_auth_nodes == 0 && cluster_name != "" && addr1_match == 1 && versions_check_ok == 1) {
+  if (good_nodes != 0 && cant_connect_nodes == 0 && cant_auth_nodes.length == 0 && cluster_name != "" && addr1_match == 1 && versions_check_ok == 1) {
     $('#create_new_cluster_form').submit();
+  } else {
+    $("#create_cluster_submit_btn").button("option", "disabled", false);
   }
 
 }
@@ -837,6 +1037,8 @@ function create_cluster_dialog() {
     text: "Create Cluster",
     id: "create_cluster_submit_btn",
     click: function() {
+      $("#create_new_cluster").find("table.err_msg_table").find("span[id$=_error_msg]").hide();
+      $("#create_cluster_submit_btn").button("option", "disabled", true);
       checkClusterNodes();
     }
   },
@@ -913,19 +1115,8 @@ function hover_out(o) {
 }
 
 function reload_current_resource() {
-  load_row_by_id(Pcs.resourcesController.cur_resource.name);
-}
-
-function load_row_by_id(resource_id) {
-  row = $("[nodeid='"+resource_id+"']");
-  if (row.parents("#resource_list").length != 0) {
-    load_agent_form(row, false);
-    load_row(row, Pcs.resourcesController, 'cur_resource', '#resource_info_div', 'cur_resource_res');
-  } else if (row.parents("#stonith_list").length != 0) {
-    load_agent_form(row, true);
-    load_row(row, Pcs.resourcesController, 'cur_resource', "#stonith_info_div", 'cur_resource_ston');
-  } else
-    alert("Unable to make " + resource_id + " active, doesn't appear to be resource or stonith");
+  tree_view_onclick(curResource(), true);
+  tree_view_onclick(curStonith(), true);
 }
 
 function load_row(node_row, ac, cur_elem, containing_elem, also_set, initial_load){
@@ -938,50 +1129,51 @@ function load_row(node_row, ac, cur_elem, containing_elem, also_set, initial_loa
     node_name = $(node_row).attr("nodeID");
     $.each(self.content, function(key, node) {
       if (node.name == node_name) {
-	if (!initial_load) {self.set(cur_elem,node);}
-	node.set(cur_elem, true);
-	if (also_set)
-	  self.set(also_set, node);
+        if (!initial_load) {
+          self.set(cur_elem,node);
+        }
+        node.set(cur_elem, true);
+        if (also_set)
+          self.set(also_set, node);
       } else {
-	if (self.cur_resource_ston &&
-	    self.cur_resource_ston.name == node.name)
-	  self.content[key].set(cur_elem,true);
-	else if (self.cur_resource_res &&
-		 self.cur_resource_res.name == node.name)
-	  self.content[key].set(cur_elem,true);
-	else
-	  self.content[key].set(cur_elem,false);
+        if (self.cur_resource_ston && self.cur_resource_ston.name == node.name)
+          self.content[key].set(cur_elem,true);
+        else if (self.cur_resource_res && self.cur_resource_res.name == node.name)
+          self.content[key].set(cur_elem,true);
+        else
+          self.content[key].set(cur_elem,false);
       }
     });
-    Pcs.resourcesController.update_cur_resource();
     $(containing_elem).fadeTo(500,1);
   });
 }
 
-function load_agent_form(resource_row, stonith) {
-  resource_name = $(resource_row).attr("nodeID");
+function load_agent_form(resource_id, stonith) {
   var url;
   var form;
-  var data = {resource: resource_name};
   if (stonith) {
     form = $("#stonith_agent_form");
     url = '/managec/' + Pcs.cluster_name + '/fence_device_form';
   } else {
     form = $("#resource_agent_form");
-    url = '/managec/' + Pcs.cluster_name + '/resource_form';
+    url = '/managec/' + Pcs.cluster_name + '/resource_form?version=2';
   }
 
   form.empty();
 
+  var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id);
+  if (!resource_obj || !resource_obj.get('is_primitive'))
+    return;
+
+  var data = {resource: resource_id};
+
   $.ajax({
     type: 'GET',
     url: url,
     data: data,
     timeout: pcs_timeout,
     success: function (data) {
-      form.html(data);
-      disable_spaces(form);
-      myform = form;
+      Ember.run.next(function(){form.html(data);});
     }
   });
 }
@@ -1003,34 +1195,42 @@ function show_loading_screen() {
 
 function hide_loading_screen() {
   $("#loading_screen").dialog('close');
+  destroy_tooltips();
+}
+
+function destroy_tooltips() {
+  $("div[id^=ui-tooltip-]").remove();
 }
 
 function remove_cluster(ids) {
-  for (var i=0; i<ids.length; i++) {
-    var cluster = ids[i];
-    var clusterid_name = "clusterid-"+ids[i];
-    var data = {}
-    data[clusterid_name] = true;
-    $.ajax({
-      type: 'POST',
-      url: '/manage/removecluster',
-      data: data,
-      timeout: pcs_timeout,
-      success: function () {
-      	location.reload();
-      },
-      error: function (xhr, status, error) {
-	alert("Unable to remove resource: " + res + " ("+error+")");
-      }
-    });
-  }
+  var data = {};
+  $.each(ids, function(_, cluster) {
+    data[ "clusterid-" + cluster] = true;
+  });
+  $.ajax({
+    type: 'POST',
+    url: '/manage/removecluster',
+    data: data,
+    timeout: pcs_timeout,
+    success: function () {
+      $("#dialog_verify_remove_clusters.ui-dialog-content").each(function(key, item) {$(item).dialog("destroy")});
+      location.reload();
+    },
+    error: function (xhr, status, error) {
+      alert("Unable to remove cluster: " + res + " ("+error+")");
+      $("#dialog_verify_remove_clusters.ui-dialog-content").each(function(key, item) {$(item).dialog("destroy")});
+    }
+  });
 }
 
-function remove_nodes(ids) {
+function remove_nodes(ids, force) {
   var data = {};
   for (var i=0; i<ids.length; i++) {
     data["nodename-"+i] = ids[i];
   }
+  if (force) {
+    data["force"] = force;
+  }
 
   $.ajax({
     type: 'POST',
@@ -1038,22 +1238,47 @@ function remove_nodes(ids) {
     data: data,
     timeout: pcs_timeout*3,
     success: function(data,textStatus) {
-      $("#remove_node").dialog("close");
+      $("#dialog_verify_remove_nodes.ui-dialog-content").each(function(key, item) {$(item).dialog("destroy")});
       if (data == "No More Nodes") {
-	window.location.href = "/manage";
-      }	else {
-	Pcs.update();
+        window.location.href = "/manage";
+      } else {
+        Pcs.update();
       }
     },
     error: function (xhr, status, error) {
-      $("#remove_node").dialog("close");
-      alert("Unable to remove nodes: " + res + " ("+error+")");
+      $("#dialog_verify_remove_nodes.ui-dialog-content").each(function(key, item) {$(item).dialog("destroy")});
+      if ((status == "timeout") || ($.trim(error) == "timeout")) {
+        /*
+         We are not interested in timeout because:
+         - it can take minutes to stop a node (resources running on it have
+           to be stopped/moved and we do not need to wait for that)
+         - if pcs is not able to stop a node it returns an (forceable) error
+           immediatelly
+        */
+        return;
+      }
+      var message = "Unable to remove nodes (" + $.trim(error) + ")";
+      message += "\n" + xhr.responseText;
+      if (message.indexOf('--force') == -1) {
+        alert(message);
+      }
+      else {
+        message = message.replace(', use --force to override', '');
+        if (confirm(message + "\n\nDo you want to force the operation?")) {
+          remove_nodes(ids, true);
+        }
+      }
     }
   });
 }
 
-function remove_resource(ids) {
-  var data = {};
+function remove_resource(ids, force) {
+  var data = {
+    no_error_if_not_exists: true
+  };
+  if (force) {
+    data["force"] = force;
+  }
   var res = "";
   for (var i=0; i<ids.length; i++) {
     res += ids[i] + ", ";
@@ -1068,10 +1293,24 @@ function remove_resource(ids) {
     data: data,
     timeout: pcs_timeout*3,
     success: function () {
+      $("#dialog_verify_remove_resources.ui-dialog-content").each(function(key, item) {$(item).dialog("destroy")});
+      $("#dialog_verify_remove_resources input[name=force]").attr("checked", false);
       Pcs.update();
     },
     error: function (xhr, status, error) {
-      alert("Unable to remove resources: " + res + " ("+error+")");
+      error = $.trim(error)
+      var message = "Unable to remove resources (" + error + ")";
+      if (
+        (xhr.responseText.substring(0,6) == "Error:") || ("Forbidden" == error)
+      ) {
+        message += "\n\n" + xhr.responseText.replace("--force", "'Enforce removal'");
+      }
+      alert(message);
+      $("#dialog_verify_remove_resources.ui-dialog-content").each(
+        function(key, item) { $(item).dialog("destroy"); }
+      );
+      $("#dialog_verify_remove_resources input[name=force]").attr("checked", false);
+      Pcs.update();
     }
   });
 }
@@ -1096,16 +1335,29 @@ function add_remove_fence_level(parent_id,remove) {
     timeout: pcs_timeout,
     success: function() {
 //      Pcs.nodesController.remove_fence_level();
-      if (!remove)
-	$(parent_id.parent()).find("input").val("");
-	$(parent_id.parent()).find("select").val("");
+      if (!remove) {
+        $(parent_id.parent()).find("input").val("");
+        $(parent_id.parent()).find("select").val("");
+      }
       Pcs.update();
     },
     error: function (xhr, status, error) {
-      if (remove)
-        alert("Unable to remove fence level: ("+xhr.responseText+")");
-      else
-        alert("Unable to add fence level: ("+xhr.responseText+")");
+      if (remove) {
+        alert(
+          "Unable to remove fence level "
+          + ajax_simple_error(xhr, status, error)
+        );
+      }
+      else {
+        if (xhr.responseText.substring(0,6) == "Error:") {
+          alert(xhr.responseText);
+        } else {
+          alert(
+            "Unable to add fence level "
+            + ajax_simple_error(xhr, status, error)
+          );
+        }
+      }
     }
   });
 }
@@ -1127,7 +1379,10 @@ function remove_node_attr(parent_id) {
       Pcs.update();
     },
     error: function (xhr, status, error) {
-      alert("Unable to add meta attribute: ("+error+")");
+      alert(
+        "Unable to remove node attribute "
+        + ajax_simple_error(xhr, status, error)
+      );
     }
   });
 }
@@ -1150,59 +1405,80 @@ function add_node_attr(parent_id) {
       Pcs.update();
     },
     error: function (xhr, status, error) {
-      alert("Unable to add node attribute: ("+error+")");
+      alert(
+        "Unable to add node attribute "
+        + ajax_simple_error(xhr, status, error)
+      );
     }
   });
 }
 
-function remove_meta_attr(parent_id) {
-  var data = {};
-  data["res_id"] = parent_id.attr("meta_attr_res");
-  data["key"] = parent_id.attr("meta_attr_key");
-  data["value"] = "";
-  fade_in_out(parent_id.parent());
-
+function node_maintenance(node) {
+  var data = {
+    node: node,
+    key: "maintenance",
+    value: "on"
+  };
   $.ajax({
     type: 'POST',
-    url: get_cluster_remote_url() + 'add_meta_attr_remote',
+    url: get_cluster_remote_url() + 'add_node_attr_remote',
     data: data,
     timeout: pcs_timeout,
-    success: function() {
-      Pcs.resourcesController.add_meta_attr(data["res_id"], data["key"], data["value"]);
-      Pcs.update();
-    },
     error: function (xhr, status, error) {
-      alert("Unable to add meta attribute: ("+error+")");
+      alert(
+        "Unable to put node '" + node + "' to maintenance mode. "
+        + ajax_simple_error(xhr, status, error)
+      );
+    },
+    complete: function() {
+      Pcs.update();
     }
   });
 }
 
-function add_meta_attr(parent_id) {
-  var data = {};
-  data["res_id"] = Pcs.resourcesController.cur_resource.name
-  data["key"] = $(parent_id + " input[name='new_meta_key']").val();
-  data["value"] = $(parent_id + " input[name='new_meta_value']").val();
-  fade_in_out($(parent_id));
-
+function node_unmaintenance(node) {
+  var data = {
+    node: node,
+    key: "maintenance",
+    value: ""
+  };
   $.ajax({
     type: 'POST',
-    url: get_cluster_remote_url() + 'add_meta_attr_remote',
+    url: get_cluster_remote_url() + 'add_node_attr_remote',
     data: data,
     timeout: pcs_timeout,
-    success: function() {
-      $(parent_id + " input").val("");
-      Pcs.resourcesController.add_meta_attr(data["res_id"], data["key"], data["value"]);
-      Pcs.update();
-    },
     error: function (xhr, status, error) {
-      alert("Unable to add meta attribute: ("+error+")");
+      alert(
+        "Unable to remove node '" + node + "' from maintenance mode. "
+        + ajax_simple_error(xhr, status, error)
+      );
+    },
+    complete: function() {
+      Pcs.update();
     }
   });
 }
 
+function remove_meta_attr(parent_id) {
+  var resource_id = curResource();
+  var attr = parent_id.attr("meta_attr_key");
+  fade_in_out(parent_id.parent());
+  Pcs.resourcesContainer.update_meta_attr(resource_id, attr);
+}
+
+function add_meta_attr(parent_id) {
+  var resource_id = curResource();
+  var attr = $(parent_id + " input[name='new_meta_key']").val();
+  var value = $(parent_id + " input[name='new_meta_value']").val();
+  fade_in_out($(parent_id));
+  $(parent_id + " input").val("");
+  Pcs.resourcesContainer.update_meta_attr(resource_id, attr, value);
+}
+
 function add_constraint(parent_id, c_type, force) {
   var data = {};
-  data["res_id"] = Pcs.resourcesController.cur_resource.name
+  data["disable_autocorrect"] = true;
+  data["res_id"] = Pcs.resourcesContainer.cur_resource.get('id');
   data["node_id"] = $(parent_id + " input[name='node_id']").val();
   data["rule"] = $(parent_id + " input[name='node_id']").val();
   data["score"] = $(parent_id + " input[name='score']").val();
@@ -1217,7 +1493,7 @@ function add_constraint(parent_id, c_type, force) {
   }
   fade_in_out($(parent_id));
 
-  $.ajax({ 
+  $.ajax({
     type: 'POST',
     url: get_cluster_remote_url() + (
       data['node_id'] && (data['node_id'].trim().indexOf(' ') != -1)
@@ -1228,36 +1504,26 @@ function add_constraint(parent_id, c_type, force) {
     timeout: pcs_timeout,
     success: function() {
       $(parent_id + " input").val("");
-      if (c_type == "loc")
-	Pcs.resourcesController.add_loc_constraint(data["res_id"],"temp-cons-id",
-						   data["node_id"], data["score"]);
-      else if (c_type == "ord")
-        Pcs.resourcesController.add_ord_constraint(
-          data["res_id"], "temp-cons-id", data["target_res_id"],
-          data['res_action'], data['target_action'], data["order"],
-          data["score"]
-        );
-      else if (c_type == "col")
-	Pcs.resourcesController.add_col_constraint(data["res_id"],"temp-cons-id",
-						   data["target_res_id"],
-						   data["colocation_type"], data["score"]);
       Pcs.update();
     },
     error: function (xhr, status, error) {
-      var message = "Unable to add constraints: (" + error + ")";
+      var message = "Unable to add constraint (" + $.trim(error) + ")";
       var error_prefix = 'Error adding constraint: ';
-      if (
-        xhr.responseText.indexOf(error_prefix) == 0
-        &&
-        xhr.responseText.indexOf('cib_replace failed') == -1
-      ) {
-        message += "\n" + xhr.responseText.slice(error_prefix.length);
+      if (xhr.responseText.indexOf('cib_replace failed') == -1) {
+        if (xhr.responseText.indexOf(error_prefix) == 0) {
+          message += "\n\n" + xhr.responseText.slice(error_prefix.length);
+        }
+        else {
+          message += "\n\n" + xhr.responseText;
+        }
       }
       if (message.indexOf('--force') == -1) {
         alert(message);
+        Pcs.update();
       }
       else {
         message = message.replace(', use --force to override', '');
+        message = message.replace('Use --force to override.', '');
         if (confirm(message + "\n\nDo you want to force the operation?")) {
           add_constraint(parent_id, c_type, true);
         }
@@ -1267,7 +1533,10 @@ function add_constraint(parent_id, c_type, force) {
 }
 
 function add_constraint_set(parent_id, c_type, force) {
-  var data = {'resources': []};
+  var data = {
+    resources: [],
+    disable_autocorrect: true
+  };
   $(parent_id + " input[name='resource_ids[]']").each(function(index, element) {
     var resources = element.value.trim();
     if (resources.length > 0) {
@@ -1290,28 +1559,26 @@ function add_constraint_set(parent_id, c_type, force) {
     timeout: pcs_timeout,
     success: function() {
       reset_constraint_set_form(parent_id);
-      if (c_type == "ord") {
-        Pcs.resourcesController.add_ord_set_constraint(
-          data["resources"], "temp-cons-id", "temp-cons-set-id"
-        );
-      }
       Pcs.update();
     },
     error: function (xhr, status, error){
-      var message = "Unable to add constraints: (" + error + ")";
+      var message = "Unable to add constraint (" + $.trim(error) + ")";
       var error_prefix = 'Error adding constraint: ';
-      if (
-        xhr.responseText.indexOf(error_prefix) == 0
-        &&
-        xhr.responseText.indexOf('cib_replace failed') == -1
-      ) {
-        message += "\n" + xhr.responseText.slice(error_prefix.length);
+      if (xhr.responseText.indexOf('cib_replace failed') == -1) {
+        if (xhr.responseText.indexOf(error_prefix) == 0) {
+          message += "\n\n" + xhr.responseText.slice(error_prefix.length);
+        }
+        else {
+          message += "\n\n" + xhr.responseText;
+        }
       }
       if (message.indexOf('--force') == -1) {
         alert(message);
+        Pcs.update();
       }
       else {
         message = message.replace(', use --force to override', '');
+        message = message.replace('Use --force to override.', '');
         if (confirm(message + "\n\nDo you want to force the operation?")) {
           add_constraint_set(parent_id, c_type, true);
         }
@@ -1339,11 +1606,14 @@ function remove_constraint(id) {
     url: get_cluster_remote_url() + 'remove_constraint_remote',
     data: {"constraint_id": id},
     timeout: pcs_timeout,
-    success: function (data) {
-      Pcs.resourcesController.remove_constraint(id);
-    },
     error: function (xhr, status, error) {
-      alert("Error removing constraint: ("+error+")");
+      alert(
+        "Error removing constraint "
+        + ajax_simple_error(xhr, status, error)
+      );
+    },
+    complete: function() {
+      Pcs.update();
     }
   });
 }
@@ -1355,11 +1625,14 @@ function remove_constraint_rule(id) {
     url: get_cluster_remote_url() + 'remove_constraint_rule_remote',
     data: {"rule_id": id},
     timeout: pcs_timeout,
-    success: function (data) {
-      Pcs.resourcesController.remove_constraint(id);
-    },
     error: function (xhr, status, error) {
-      alert("Error removing constraint rule: ("+error+")");
+      alert(
+        "Error removing constraint rule "
+        + ajax_simple_error(xhr, status, error)
+      );
+    },
+    complete: function() {
+      Pcs.update();
     }
   });
 }
@@ -1378,41 +1651,14 @@ function add_acl_role(form) {
       $("#add_acl_role").dialog("close");
     },
     error: function(xhr, status, error) {
-      alert(xhr.responseText);
+      alert(
+        "Error adding ACL role "
+        + ajax_simple_error(xhr, status, error)
+      );
     }
   });
 }
 
-function verify_acl_role_remove() {
-  var buttonOpts = {};
-  var ids = [];
-  var html_roles = "";
-  $.each($('.node_list_check :checked'), function (i,e) {
-    var role_id = $(e).parent().parent().attr("nodeID");
-    ids.push(role_id);
-    html_roles += "<li>" + role_id + "</li>";
-  });
-  if (ids.length == 0) {
-    alert("You must select at least one role to remove");
-    return;
-  }
-  buttonOpts["Remove Role(s)"] = function() {
-    if (ids.length > 0) {
-      remove_acl_roles(ids);
-    }
-  }
-  buttonOpts["Cancel"] = function() {
-    $(this).dialog("close");
-  };
-  $("#roles_to_remove").html("<ul>" + html_roles + "<ul>");
-  $("#remove_acl_roles").dialog({
-    title: "Remove ACL Role",
-    modal: true,
-    resizable: false,
-    buttons: buttonOpts
-  });
-}
-
 function remove_acl_roles(ids) {
   var data = {};
   for (var i = 0; i < ids.length; i++) {
@@ -1424,12 +1670,19 @@ function remove_acl_roles(ids) {
     data: data,
     timeout: pcs_timeout*3,
     success: function(data,textStatus) {
-      $("#remove_acl_roles").dialog("close");
+      $("#dialog_verify_remove_acl_roles.ui-dialog-content").each(
+        function(key, item) { $(item).dialog("destroy"); }
+      );
       Pcs.update();
     },
     error: function (xhr, status, error) {
-      $("#remove_acl_roles").dialog("close");
-      alert(xhr.responseText);
+      alert(
+        "Error removing ACL role "
+        + ajax_simple_error(xhr, status, error)
+      );
+      $("#dialog_verify_remove_acl_roles.ui-dialog-content").each(
+        function(key, item) { $(item).dialog("destroy"); }
+      );
     }
   });
 }
@@ -1437,17 +1690,20 @@ function remove_acl_roles(ids) {
 function add_acl_item(parent_id, item_type) {
   var data = {};
   data["role_id"] = Pcs.aclsController.cur_role.name;
+  var item_label = "";
   switch (item_type) {
     case "perm":
       data["item"] = "permission";
       data["type"] = $(parent_id + " select[name='role_type']").val();
       data["xpath_id"] = $(parent_id + " select[name='role_xpath_id']").val();
       data["query_id"] = $(parent_id + " input[name='role_query_id']").val().trim();
+      item_label = "permission"
       break;
     case "user":
     case "group":
       data["item"] = item_type;
       data["usergroup"] = $(parent_id + " input[name='role_assign_user']").val().trim();
+      item_label = item_type
       break;
   }
   fade_in_out($(parent_id));
@@ -1461,7 +1717,10 @@ function add_acl_item(parent_id, item_type) {
       Pcs.update();
     },
     error: function (xhr, status, error) {
-      alert(xhr.responseText);
+      alert(
+        "Error adding " + item_label + " "
+        + ajax_simple_error(xhr, status, error)
+      );
     }
   });
 }
@@ -1469,15 +1728,18 @@ function add_acl_item(parent_id, item_type) {
 function remove_acl_item(id,item) {
   fade_in_out(id);
   var data = {};
+  var item_label = "";
   switch (item) {
     case "perm":
       data["item"] = "permission";
       data["acl_perm_id"] = id.attr("acl_perm_id");
+      item_label = "permission"
       break;
     case "usergroup":
       data["item"] = "usergroup";
       data["usergroup_id"] = id.attr("usergroup_id")
       data["role_id"] = id.attr("role_id")
+      item_label = "user / group"
       break;
   }
 
@@ -1488,26 +1750,16 @@ function remove_acl_item(id,item) {
     timeout: pcs_timeout,
     success: function (data) {
       Pcs.update();
-//      Pcs.resourcesController.remove_constraint(id);
     },
     error: function (xhr, status, error) {
-      alert(xhr.responseText);
+      alert(
+        "Error removing " + item_label + " "
+        + ajax_simple_error(xhr, status, error)
+      );
     }
   });
 }
 
-function show_cluster_info(row) {
-  cluster_name = $(row).attr("nodeID");
-
-  $("#node_sub_info").children().each(function (i, val) {
-    if ($(val).attr("id") == ("cluster_info_" + cluster_name))
-      $(val).show();
-    else
-      $(val).hide();
-  });
-
-}
-
 function update_cluster_settings(form) {
   var data = form.serialize();
   $('html, body, form, :input, :submit').css("cursor","wait");
@@ -1520,7 +1772,10 @@ function update_cluster_settings(form) {
       window.location.reload();
     },
     error: function (xhr, status, error) {
-      alert("Error updating configuration: ("+error+")");
+      alert(
+        "Error updating configuration "
+        + ajax_simple_error(xhr, status, error)
+      );
       $('html, body, form, :input, :submit').css("cursor","auto");
     }
   });
@@ -1528,7 +1783,7 @@ function update_cluster_settings(form) {
 
 // Pull currently managed cluster name out of URL
 function get_cluster_name() {
-  cluster_name = location.pathname.match("/managec/(.*)/");
+  var cluster_name = location.pathname.match("/managec/(.*)/");
   if (cluster_name && cluster_name.length >= 2) {
     Ember.debug("Cluster Name: " + cluster_name[1]);
     cluster_name = cluster_name[1];
@@ -1539,8 +1794,9 @@ function get_cluster_name() {
   return cluster_name;
 }
 
-function get_cluster_remote_url() {
-    return '/managec/' + Pcs.cluster_name + "/";
+function get_cluster_remote_url(cluster_name) {
+  cluster_name = typeof cluster_name !== 'undefined' ? cluster_name : Pcs.cluster_name;
+  return '/managec/' + cluster_name + "/";
 }
 
 function checkBoxToggle(cb,nodes) {
@@ -1550,9 +1806,9 @@ function checkBoxToggle(cb,nodes) {
     cbs = $(cb).closest("tr").parent().find(".node_list_check input[type=checkbox]")
   }
   if ($(cb).prop('checked'))
-    cbs.prop('checked',true);
+    cbs.prop('checked',true).change();
   else
-    cbs.prop('checked',false);
+    cbs.prop('checked',false).change();
 }
 
 function loadWizard(item) {
@@ -1592,3 +1848,655 @@ function setup_resource_class_provider_selection() {
   });
   $("#resource_class_provider_selector").change();
 }
+
+function get_status_value(status) {
+  var values = {
+    failed: 1,
+    error: 1,
+    offline: 1,
+    blocked: 1,
+    warning: 2,
+    standby: 2,
+    maintenance: 2,
+    "partially running": 2,
+    disabled: 3,
+    unknown: 4,
+    ok: 5,
+    running: 5,
+    online: 5
+  };
+  return ((values.hasOwnProperty(status)) ? values[status] : -1);
+}
+
+function status_comparator(a,b) {
+  var valA = get_status_value(a);
+  var valB = get_status_value(b);
+  if (valA == -1) return 1;
+  if (valB == -1) return -1;
+  return valA - valB;
+}
+
+function get_status_icon_class(status_val) {
+  switch (status_val) {
+    case get_status_value("error"):
+      return "error";
+    case get_status_value("disabled"):
+    case get_status_value("warning"):
+      return "warning";
+    case get_status_value("ok"):
+      return "check";
+    default:
+      return "x";
+  }
+}
+
+function get_status_color(status_val) {
+  if (status_val == get_status_value("ok")) {
+    return "green";
+  }
+  else if (status_val == get_status_value("warning") || status_val == get_status_value("unknown") || status_val == get_status_value('disabled')) {
+    return "orange";
+  }
+  return "red";
+}
+
+function show_hide_dashboard(element, type) {
+  var cluster = Pcs.clusterController.cur_cluster;
+  if (Pcs.clusterController.get("show_all_" + type)) { // show only failed
+    Pcs.clusterController.set("show_all_" + type, false);
+  } else { // show all
+    Pcs.clusterController.set("show_all_" + type, true);
+  }
+  correct_visibility_dashboard_type(cluster, type);
+}
+
+function correct_visibility_dashboard(cluster) {
+  if (cluster == null)
+    return;
+  $.each(["nodes", "resources", "fence"], function(key, type) {
+    correct_visibility_dashboard_type(cluster, type);
+  });
+}
+
+function correct_visibility_dashboard_type(cluster, type) {
+  if (cluster == null) {
+    return;
+  }
+  destroy_tooltips();
+  var listTable = $("#cluster_info_" + cluster.name).find("table." + type + "_list");
+  var datatable = listTable.find("table.datatable");
+  if (Pcs.clusterController.get("show_all_" + type)) {
+    listTable.find("span.downarrow").show();
+    listTable.find("span.rightarrow").hide();
+    datatable.find("tr.default-hidden").removeClass("hidden");
+  } else {
+    listTable.find("span.downarrow").hide();
+    listTable.find("span.rightarrow").show();
+    datatable.find("tr.default-hidden").addClass("hidden");
+  }
+  if (cluster.get(type + "_failed") == 0 && !Pcs.clusterController.get("show_all_" + type)) {
+    datatable.hide();
+  } else {
+    datatable.show();
+  }
+}
+
+function get_formated_html_list(data) {
+  if (data == null || data.length == 0) {
+    return "";
+  }
+  var out = "<ul>";
+  $.each(data, function(key, value) {
+    out += "<li>" + htmlEncode(value.message) + "</li>";
+  });
+  out += "</ul>";
+  return out;
+}
+
+function htmlEncode(s)
+{
+  return $("<div/>").text(s).html().replace(/"/g, """).replace(/'/g, "'");
+}
+
+function fix_auth_of_cluster() {
+  show_loading_screen();
+  var clustername = Pcs.clusterController.cur_cluster.name;
+  $.ajax({
+    url: "/remote/fix_auth_of_cluster",
+    type: "POST",
+    data: "clustername=" + clustername,
+    success: function(data) {
+      hide_loading_screen();
+      Pcs.update();
+    },
+    error: function(jqhxr,b,c) {
+      hide_loading_screen();
+      Pcs.update();
+      alert(jqhxr.responseText);
+    }
+  });
+}
+
+function get_tree_view_element_id(element) {
+  return $(element).parents('table.tree-element')[0].id;
+}
+
+function get_list_view_element_id(element) {
+  return $(element)[0].id;
+}
+
+function auto_show_hide_constraints() {
+  var cont = ["location_constraints", "ordering_constraints", "ordering_set_constraints", "colocation_constraints", "meta_attributes"];
+  $.each(cont, function(index, name) {
+    var elem = $("#" + name)[0];
+    var cur_resource = Pcs.resourcesContainer.get('cur_resource');
+    if (elem && cur_resource) {
+      var visible = $(elem).children("span")[0].style.display != 'none';
+      if (visible && (!cur_resource.get(name) || cur_resource.get(name).length == 0))
+        show_hide_constraints(elem);
+      else if (!visible && cur_resource.get(name) && cur_resource.get(name).length > 0)
+        show_hide_constraints(elem);
+    }
+  });
+}
+
+function tree_view_onclick(resource_id, auto) {
+  auto = typeof auto !== 'undefined' ? auto : false;
+  var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id);
+  if (!resource_obj) {
+    console.log("Resource " + resource_id + "not found.");
+    return;
+  }
+  if (resource_obj.get('stonith')) {
+    Pcs.resourcesContainer.set('cur_fence', resource_obj);
+    if (!auto) window.location.hash = "/fencedevices/" + resource_id;
+  } else {
+    Pcs.resourcesContainer.set('cur_resource', resource_obj);
+    if (!auto) window.location.hash = "/resources/" + resource_id;
+    auto_show_hide_constraints();
+  }
+
+  tree_view_select(resource_id);
+
+  Ember.run.next(Pcs, function() {
+    load_agent_form(resource_id, resource_obj.get('stonith'));
+  });
+}
+
+function tree_view_select(element_id) {
+  var e = $('#' + element_id);
+  var view = e.parents('table.tree-view');
+  view.find('div.arrow').hide();
+  view.find('tr.children').hide();
+  view.find('table.tree-element').show();
+  view.find('tr.tree-element-name').removeClass("node_selected");
+  e.find('tr.tree-element-name:first').addClass("node_selected");
+  e.find('tr.tree-element-name div.arrow:first').show();
+  e.parents('tr.children').show();
+  e.find('tr.children').show();
+}
+
+function list_view_select(element_id) {
+  var e = $('#' + element_id);
+  var view = e.parents('table.list-view');
+  view.find('div.arrow').hide();
+  view.find('tr.list-view-element').removeClass("node_selected");
+  e.addClass('node_selected');
+  e.find('div.arrow').show();
+}
+
+function tree_view_checkbox_onchange(element) {
+  var e = $(element);
+  var children = $(element).closest(".tree-element").find(".children" +
+    " input:checkbox");
+  var val = e.prop('checked');
+  children.prop('checked', val);
+  children.prop('disabled', val);
+}
+
+function resource_master(resource_id) {
+  show_loading_screen();
+  $.ajax({
+    type: 'POST',
+    url: get_cluster_remote_url() + 'resource_master',
+    data: {resource_id: resource_id},
+    timeout: pcs_timeout,
+    error: function (xhr, status, error) {
+      alert(
+        "Unable to create master/slave resource "
+        + ajax_simple_error(xhr, status, error)
+      );
+    },
+    complete: function() {
+      Pcs.update();
+    }
+  });
+}
+
+function resource_clone(resource_id) {
+  show_loading_screen();
+  $.ajax({
+    type: 'POST',
+    url: get_cluster_remote_url() + 'resource_clone',
+    data: {resource_id: resource_id},
+    timeout: pcs_timeout,
+    error: function (xhr, status, error) {
+      alert(
+        "Unable to clone the resource "
+        + ajax_simple_error(xhr, status, error)
+      );
+    },
+    complete: function() {
+      Pcs.update();
+    }
+  });
+}
+
+function resource_unclone(resource_id) {
+  show_loading_screen();
+  var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id);
+  if (resource_obj.get('class_type') == 'clone') {
+    resource_id = resource_obj.get('member').get('id');
+  }
+  $.ajax({
+    type: 'POST',
+    url: get_cluster_remote_url() + 'resource_unclone',
+    data: {resource_id: resource_id},
+    timeout: pcs_timeout,
+    error: function (xhr, status, error) {
+      alert(
+        "Unable to unclone the resource "
+        + ajax_simple_error(xhr, status, error)
+      );
+    },
+    complete: function() {
+      Pcs.update();
+    }
+  });
+}
+
+function resource_ungroup(group_id) {
+  show_loading_screen();
+  $.ajax({
+    type: 'POST',
+    url: get_cluster_remote_url() + 'resource_ungroup',
+    data: {group_id: group_id},
+    timeout: pcs_timeout,
+    error: function (xhr, status, error) {
+      alert(
+        "Unable to ungroup the resource "
+        + ajax_simple_error(xhr, status, error)
+      );
+    },
+    complete: function() {
+      Pcs.update();
+    }
+  });
+}
+
+function resource_change_group(resource_id, group_id) {
+  show_loading_screen();
+  var resource_obj = Pcs.resourcesContainer.get_resource_by_id(resource_id);
+  var data = {
+    resource_id: resource_id,
+    group_id: group_id
+  };
+  
+  if (resource_obj.get('parent')) {
+    if (resource_obj.get('parent').get('id') == group_id) {
+      return;  
+    }
+    if (resource_obj.get('parent').get('class_type') == 'group') {
+      data['old_group_id'] = resource_obj.get('parent').get('id');
+    }
+  }
+
+  $.ajax({
+    type: 'POST',
+    url: get_cluster_remote_url() + 'resource_change_group',
+    data: data,
+    timeout: pcs_timeout,
+    error: function (xhr, status, error) {
+      alert(
+        "Unable to change group "
+        + ajax_simple_error(xhr, status, error)
+      );
+    },
+    complete: function() {
+      Pcs.update();
+    }
+  });
+}
+
+function ajax_simple_error(xhr, status, error) {
+  var message = "(" + $.trim(error) + ")"
+  if (
+    $.trim(xhr.responseText).length > 0
+    &&
+    xhr.responseText.indexOf('cib_replace failed') == -1
+  ) {
+    message = message + "\n\n" + $.trim(xhr.responseText);
+  }
+  return message;
+}
+
+var permissions_current_cluster;
+
+function permissions_load_all() {
+  show_loading_screen();
+
+  var cluster_list = [];
+  $("#node_info div[id^='permissions_cluster_']").each(function(i, div) {
+    cluster_list.push(
+      $(div).attr("id").substring("permissions_cluster_".length)
+    );
+  });
+
+  var call_count = cluster_list.length;
+  var callback = function() {
+    call_count = call_count - 1;
+    if (call_count < 1) {
+      hide_loading_screen();
+    }
+  }
+
+  $.each(cluster_list, function(index, cluster) {
+    permissions_load_cluster(cluster, callback);
+  });
+
+  if (cluster_list.length > 0) {
+    permissions_current_cluster = cluster_list[0];
+    permissions_show_cluster(
+      permissions_current_cluster,
+      $("#cluster_list tr").first().next() /* the first row is a heading */
+    );
+  }
+  else {
+    hide_loading_screen();
+  }
+}
+
+function permissions_load_cluster(cluster_name, callback) {
+  var element_id = "permissions_cluster_" + cluster_name;
+  $.ajax({
+    type: "GET",
+    url: "/permissions_cluster_form/" + cluster_name,
+    timeout: pcs_timeout,
+    success: function(data) {
+      $("#" + element_id).html(data);
+      $("#" + element_id + " :checkbox").each(function(key, checkbox) {
+        permissions_fix_dependent_checkboxes(checkbox);
+      });
+      permissions_cluster_dirty_flag(cluster_name, false);
+      if (callback) {
+        callback();
+      }
+    },
+    error: function(xhr, status, error) {
+      $("#" + element_id).html(
+        "Error loading permissions " + ajax_simple_error(xhr, status, error)
+      );
+      if (callback) {
+        callback();
+      }
+    }
+  });
+}
+
+function permissions_show_cluster(cluster_name, list_row) {
+  permissions_current_cluster = cluster_name;
+
+  var container = $("#node_info");
+  container.fadeTo(500, .01, function() {
+    container.children().hide();
+    $("#permissions_cluster_" + cluster_name).show();
+    container.fadeTo(500, 1);
+  });
+
+  $(list_row).siblings("tr").each(function(index, row) {
+    hover_out(row);
+    $(row).find("td").last().children().hide();
+  });
+  hover_over(list_row);
+  $(list_row).find("td").last().children().show();
+}
+
+function permissions_save_cluster(form) {
+  var dataString = $(form).serialize();
+  var cluster_name = permissions_get_clustername(form);
+  $.ajax({
+    type: "POST",
+    url: "/permissions_save/",
+    timeout: pcs_timeout,
+    data: dataString,
+    success: function() {
+      show_loading_screen();
+      permissions_load_cluster(cluster_name, hide_loading_screen);
+    },
+    error: function(xhr, status, error) {
+      alert(
+        "Unable to save permissions of cluster " + cluster_name + " "
+        + ajax_simple_error(xhr, status, error)
+      );
+    }
+  });
+}
+
+function permissions_cluster_dirty_flag(cluster_name, flag) {
+  var cluster_row = permissions_get_cluster_row(cluster_name);
+  if (cluster_row) {
+    var dirty_elem = cluster_row.find("span[class=unsaved_changes]");
+    if (dirty_elem) {
+      if (flag) {
+        dirty_elem.show();
+      }
+      else {
+        dirty_elem.hide();
+      }
+    }
+  }
+}
+
+function permission_remove_row(button) {
+  var cluster_name = permissions_get_clustername(
+    $(button).parents("form").first()
+  );
+  $(button).parent().parent().remove();
+  permissions_cluster_dirty_flag(cluster_name, true);
+}
+
+function permissions_add_row(template_row) {
+  var user_name = permissions_get_row_name(template_row);
+  var user_type = permissions_get_row_type(template_row);
+  var max_key = -1;
+  var exists = false;
+  var cluster_name = permissions_get_clustername(
+    $(template_row).parents("form").first()
+  );
+
+  if("" == user_name) {
+    alert("Please enter the name");
+    return;
+  }
+  if("" == user_type) {
+    alert("Please enter the type");
+    return;
+  }
+
+  $(template_row).siblings().each(function(index, row) {
+    if(
+      (permissions_get_row_name(row) == user_name)
+      &&
+      (permissions_get_row_type(row) == user_type)
+    ) {
+      exists = true;
+    }
+    $(row).find("input").each(function(index, input) {
+      var match = input.name.match(/^[^[]*\[(\d+)\].*$/);
+      if (match) {
+        var key = parseInt(match[1]);
+        if(key > max_key) {
+          max_key = key;
+        }
+      }
+    });
+  });
+  if(exists) {
+    alert("Permissions already set for the user");
+    return;
+  }
+
+  max_key = max_key + 1;
+  var new_row = $(template_row).clone();
+  new_row.find("[name*='_new']").each(function(index, element) {
+    element.name = element.name.replace("_new", "[" + max_key + "]");
+  });
+  new_row.find("td").last().html(
+    '<a class="remove" href="#" onclick="permission_remove_row(this);">X</a>'
+  );
+  new_row.find("[name$='[name]']").each(function(index, element) {
+    $(element).after(user_name);
+    $(element).attr("type", "hidden");
+  });
+  new_row.find("[name$='[type]']").each(function(index, element) {
+    $(element).after(user_type);
+    $(element).after(
+      '<input type="hidden" name="' + element.name  + '" value="' + user_type + '">'
+    );
+    $(element).remove();
+  });
+
+  $(template_row).before(new_row);
+  var template_inputs = $(template_row).find(":input");
+  template_inputs.removeAttr("checked").removeAttr("selected");
+  template_inputs.removeAttr("disabled").removeAttr("readonly");
+  $(template_row).find(":input[type=text]").val("");
+
+  permissions_cluster_dirty_flag(cluster_name, true);
+}
+
+function permissions_get_dependent_checkboxes(checkbox) {
+  var cluster_name = permissions_get_clustername(
+    $(checkbox).parents("form").first()
+  );
+  var checkbox_permission = permissions_get_checkbox_permission(checkbox);
+  var deps = {};
+  var dependent_permissions = [];
+  var dependent_checkboxes = [];
+
+  if (permissions_dependencies[cluster_name]) {
+    deps = permissions_dependencies[cluster_name];
+    if (deps["also_allows"] && deps["also_allows"][checkbox_permission]) {
+      dependent_permissions = deps["also_allows"][checkbox_permission];
+      $(checkbox).parents("tr").first().find(":checkbox").not(checkbox).each(
+        function(key, check) {
+          var perm = permissions_get_checkbox_permission(check);
+          if (dependent_permissions.indexOf(perm) != -1) {
+            dependent_checkboxes.push(check);
+          }
+        }
+      );
+    }
+  }
+  return dependent_checkboxes;
+}
+
+function permissions_fix_dependent_checkboxes(checkbox) {
+  var dep_checks = $(permissions_get_dependent_checkboxes(checkbox));
+  if ($(checkbox).prop("checked")) {
+    /* the checkbox is now checked */
+    dep_checks.each(function(key, check) {
+      var jq_check = $(check);
+      jq_check.prop("checked", true);
+      jq_check.prop("readonly", true);
+      // readonly on checkbox makes it look like readonly but doesn't prevent
+      // changing its state (checked - not checked), setting disabled works
+      jq_check.prop("disabled", true);
+      permissions_fix_dependent_checkboxes(check);
+    });
+  }
+  else {
+    /* the checkbox is now empty */
+    dep_checks.each(function(key, check) {
+      var jq_check = $(check);
+      jq_check.prop("checked", jq_check.prop("defaultChecked"));
+      jq_check.prop("readonly", false);
+      jq_check.prop("disabled", false);
+      permissions_fix_dependent_checkboxes(check);
+    });
+  }
+}
+
+function permissions_get_row_name(row) {
+  return $.trim($(row).find("[name$='[name]']").val());
+}
+
+function permissions_get_row_type(row) {
+  return $.trim($(row).find("[name$='[type]']").val());
+}
+
+function permissions_get_clustername(form) {
+  return $.trim($(form).find("[name=cluster_name]").val());
+}
+
+function permissions_get_checkbox_permission(checkbox) {
+  var match = checkbox.name.match(/^.*\[([^[]+)\]$/);
+  if (match) {
+    return match[1];
+  }
+  return "";
+}
+
+function permissions_get_cluster_row(cluster_name) {
+  var cluster_row = null;
+  $('#cluster_list td[class=node_name]').each(function(index, elem) {
+    var jq_elem = $(elem);
+    if (jq_elem.text().trim() == cluster_name.trim()) {
+      cluster_row = jq_elem.parents("tr").first();
+    }
+  });
+  return cluster_row;
+}
+
+function is_cib_true(value) {
+  if (value) {
+    return (['true', 'on', 'yes', 'y', '1'].indexOf(value.toString().toLowerCase()) != -1);
+  }
+  return false;
+}
+
+function set_utilization(type, entity_id, name, value) {
+  var data = {
+    name: name,
+    value: value
+  };
+  if (type == "node") {
+    data["node"] = entity_id;
+  } else if (type == "resource") {
+    data["resource_id"] = entity_id;
+  } else return false;
+  var url = get_cluster_remote_url() + "set_" + type + "_utilization";
+
+  $.ajax({
+    type: 'POST',
+    url: url,
+    data: data,
+    timeout: pcs_timeout,
+    error: function (xhr, status, error) {
+      alert(
+        "Unable to set utilization: "
+        + ajax_simple_error(xhr, status, error)
+      );
+    },
+    complete: function() {
+      Pcs.update();
+    }
+  });
+}
+
+function is_integer(str) {
+  if (Number(str) === str && str % 1 === 0) // if argument isn't string but number
+    return true;
+  var n = ~~Number(str);
+  return String(n) === str;
+}
diff --git a/pcsd/remote.rb b/pcsd/remote.rb
index 05f346d..4b8505b 100644
--- a/pcsd/remote.rb
+++ b/pcsd/remote.rb
@@ -1,153 +1,193 @@
 require 'json'
 require 'uri'
+require 'open4'
+require 'set'
+require 'timeout'
+
 require 'pcs.rb'
 require 'resource.rb'
-require 'open4'
+require 'config.rb'
+require 'cfgsync.rb'
+require 'cluster_entity.rb'
+require 'permissions.rb'
+require 'auth.rb'
 
 # Commands for remote access
-def remote(params,request)
-  pacemaker_status = pacemaker_running?
-
-  case (params[:command])
-  when "status"
-    return node_status(params)
-  when "status_all"
-    return status_all(params)
-  when "auth"
-    return auth(params,request)
-  when "check_auth"
-    return check_auth(params, request)
-  when "setup_cluster"
-    return setup_cluster(params)
-  when "create_cluster"
-    return create_cluster(params)
-  when "get_quorum_info"
-    return get_quorum_info(params)
-  when "get_cib"
-    return get_cib(params)
-  when "get_corosync_conf"
-    return get_corosync_conf(params)
-  when "set_cluster_conf"
-    if set_cluster_conf(params)
-      return "Updated cluster.conf..."
+def remote(params, request, session)
+  remote_cmd_without_pacemaker = {
+      :status => method(:node_status),
+      :status_all => method(:status_all),
+      :cluster_status => method(:cluster_status_remote),
+      :auth => method(:auth),
+      :check_auth => method(:check_auth),
+      :fix_auth_of_cluster => method(:fix_auth_of_cluster),
+      :setup_cluster => method(:setup_cluster),
+      :create_cluster => method(:create_cluster),
+      :get_quorum_info => method(:get_quorum_info),
+      :get_cib => method(:get_cib),
+      :get_corosync_conf => method(:get_corosync_conf_remote),
+      :set_cluster_conf => method(:set_cluster_conf),
+      :set_corosync_conf => method(:set_corosync_conf),
+      :get_sync_capabilities => method(:get_sync_capabilities),
+      :set_sync_options => method(:set_sync_options),
+      :get_configs => method(:get_configs),
+      :set_configs => method(:set_configs),
+      :set_certs => method(:set_certs),
+      :pcsd_restart => method(:remote_pcsd_restart),
+      :get_permissions => method(:get_permissions_remote),
+      :set_permissions => method(:set_permissions_remote),
+      :cluster_start => method(:cluster_start),
+      :cluster_stop => method(:cluster_stop),
+      :config_backup => method(:config_backup),
+      :config_restore => method(:config_restore),
+      :node_restart => method(:node_restart),
+      :node_standby => method(:node_standby),
+      :node_unstandby => method(:node_unstandby),
+      :cluster_enable => method(:cluster_enable),
+      :cluster_disable => method(:cluster_disable),
+      :resource_status => method(:resource_status),
+      :check_gui_status => method(:check_gui_status),
+      :get_sw_versions => method(:get_sw_versions),
+      :node_available => method(:remote_node_available),
+      :add_node_all => lambda { |params_, request_, session_|
+        remote_add_node(params_, request_, session_, true)
+      },
+      :add_node => lambda { |params_, request_, session_|
+        remote_add_node(params_, request_, session_, false)
+      },
+      :remove_nodes => method(:remote_remove_nodes),
+      :remove_node => method(:remote_remove_node),
+      :cluster_destroy => method(:cluster_destroy),
+      :get_wizard => method(:get_wizard),
+      :wizard_submit => method(:wizard_submit),
+      :auth_gui_against_nodes => method(:auth_gui_against_nodes),
+      :get_tokens => method(:get_tokens),
+      :get_cluster_tokens => method(:get_cluster_tokens),
+      :save_tokens => method(:save_tokens),
+      :add_node_to_cluster => method(:add_node_to_cluster),
+  }
+  remote_cmd_with_pacemaker = {
+      :resource_start => method(:resource_start),
+      :resource_stop => method(:resource_stop),
+      :resource_cleanup => method(:resource_cleanup),
+      :resource_form => method(:resource_form),
+      :fence_device_form => method(:fence_device_form),
+      :update_resource => method(:update_resource),
+      :update_fence_device => method(:update_fence_device),
+      :resource_metadata => method(:resource_metadata),
+      :fence_device_metadata => method(:fence_device_metadata),
+      :get_avail_resource_agents => method(:get_avail_resource_agents),
+      :get_avail_fence_agents => method(:get_avail_fence_agents),
+      :remove_resource => method(:remove_resource),
+      :add_constraint_remote => method(:add_constraint_remote),
+      :add_constraint_rule_remote => method(:add_constraint_rule_remote),
+      :add_constraint_set_remote => method(:add_constraint_set_remote),
+      :remove_constraint_remote => method(:remove_constraint_remote),
+      :remove_constraint_rule_remote => method(:remove_constraint_rule_remote),
+      :add_meta_attr_remote => method(:add_meta_attr_remote),
+      :add_group => method(:add_group),
+      :update_cluster_settings => method(:update_cluster_settings),
+      :add_fence_level_remote => method(:add_fence_level_remote),
+      :add_node_attr_remote => method(:add_node_attr_remote),
+      :add_acl_role => method(:add_acl_role_remote),
+      :remove_acl_roles => method(:remove_acl_roles_remote),
+      :add_acl => method(:add_acl_remote),
+      :remove_acl => method(:remove_acl_remote),
+      :resource_change_group => method(:resource_change_group),
+      :resource_master => method(:resource_master),
+      :resource_clone => method(:resource_clone),
+      :resource_unclone => method(:resource_unclone),
+      :resource_ungroup => method(:resource_ungroup),
+      :set_resource_utilization => method(:set_resource_utilization),
+      :set_node_utilization => method(:set_node_utilization)
+  }
+
+  command = params[:command].to_sym
+
+  if remote_cmd_without_pacemaker.include? command
+    return remote_cmd_without_pacemaker[command].call(params, request, session)
+  elsif remote_cmd_with_pacemaker.include? command
+    if pacemaker_running?
+      return remote_cmd_with_pacemaker[command].call(params, request, session)
     else
-      return "Failed to update cluster.conf..."
+      return [200,'{"pacemaker_not_running":true}']
     end
-  when "set_corosync_conf"
-    if set_corosync_conf(params)
-      return "Succeeded"
-    else
-      return "Failed"
-    end
-  when "cluster_start"
-    return cluster_start(params)
-  when "cluster_stop"
-    return cluster_stop(params)
-  when "config_backup"
-    return config_backup(params)
-  when "config_restore"
-    return config_restore(params)
-  when "node_restart"
-    return node_restart(params)
-  when "node_standby"
-    return node_standby(params)
-  when "node_unstandby"
-    return node_unstandby(params)
-  when "cluster_enable"
-    return cluster_enable(params)
-  when "cluster_disable"
-    return cluster_disable(params)
-  when "resource_status"
-    return resource_status(params)
-  when "check_gui_status"
-    return check_gui_status(params)
-  when "get_sw_versions"
-    return get_sw_versions(params)
-  when "node_available"
-    return remote_node_available(params)
-  when "add_node_all"
-    return remote_add_node(params,true)
-  when "add_node"
-    return remote_add_node(params,false)
-  when "remove_nodes"
-    return remote_remove_nodes(params)
-  when "remove_node"
-    return remote_remove_node(params)
-  when "cluster_destroy"
-    return cluster_destroy(params)
-  when "get_wizard"
-    return get_wizard(params)
-  when "wizard_submit"
-    return wizard_submit(params)
-  end
-
-  if not pacemaker_status
-    return [200,'{"pacemaker_not_running":true}']
-  end
-  # Anything below this line will not be run if pacemaker is not started
-
-  case (params[:command])
-  when "resource_start"
-    return resource_start(params)
-  when "resource_stop"
-    return resource_stop(params)
-  when "resource_cleanup"
-    return resource_cleanup(params)
-  when "resource_form"
-    return resource_form(params)
-  when "fence_device_form"
-    return fence_device_form(params)
-  when "update_resource"
-    return update_resource(params)
-  when "update_fence_device"
-    return update_fence_device(params)
-  when "resource_metadata"
-    return resource_metadata(params)
-  when "fence_device_metadata"
-    return fence_device_metadata(params)
-  when "get_avail_resource_agents"
-    return get_avail_resource_agents(params)
-  when "get_avail_fence_agents"
-    return get_avail_fence_agents(params)
-  when "remove_resource"
-    return remove_resource(params)
-  when "add_constraint_remote"
-    return add_constraint_remote(params)
-  when "add_constraint_rule_remote"
-    return add_constraint_rule_remote(params)
-  when "add_constraint_set_remote"
-    return add_constraint_set_remote(params)
-  when "remove_constraint_remote"
-    return remove_constraint_remote(params)
-  when "remove_constraint_rule_remote"
-    return remove_constraint_rule_remote(params)
-  when "add_meta_attr_remote"
-    return add_meta_attr_remote(params)
-  when "add_group"
-    return add_group(params)
-  when "update_cluster_settings"
-    return update_cluster_settings(params)
-  when "add_fence_level_remote"
-    return add_fence_level_remote(params)
-  when "add_node_attr_remote"
-    return add_node_attr_remote(params)
-  when "add_acl_role"
-    return add_acl_role_remote(params)
-  when "remove_acl_roles"
-    return remove_acl_roles_remote(params)
-  when "add_acl"
-    return add_acl_remote(params)
-  when "remove_acl"
-    return remove_acl_remote(params)
   else
     return [404, "Unknown Request"]
   end
 end
 
-def cluster_start(params)
+# provides remote cluster status to a local gui
+def cluster_status_gui(session, cluster_name, dont_update_config=false)
+  cluster_nodes = get_cluster_nodes(cluster_name)
+  status = cluster_status_from_nodes(session, cluster_nodes, cluster_name)
+  unless status
+    return 403, 'Permission denied'
+  end
+
+  new_cluster_nodes = []
+  new_cluster_nodes += status[:corosync_offline] if status[:corosync_offline]
+  new_cluster_nodes += status[:corosync_online] if status[:corosync_online]
+  new_cluster_nodes += status[:pacemaker_offline] if status[:pacemaker_offline]
+  new_cluster_nodes += status[:pacemaker_online] if status[:pacemaker_online]
+  new_cluster_nodes.uniq!
+
+  if new_cluster_nodes.length > 0
+    config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+    if !(dont_update_config or config.cluster_nodes_equal?(cluster_name, new_cluster_nodes))
+      old_cluster_nodes = config.get_nodes(cluster_name)
+      $logger.info("Updating node list for: #{cluster_name} #{old_cluster_nodes}->#{new_cluster_nodes}")
+      config.update_cluster(cluster_name, new_cluster_nodes)
+      sync_config = Cfgsync::PcsdSettings.from_text(config.text())
+      # on version conflict just go on, config will be corrected eventually
+      # by displaying the cluster in the web UI
+      Cfgsync::save_sync_new_version(
+          sync_config, get_corosync_nodes(), $cluster_name, true
+      )
+      return cluster_status_gui(session, cluster_name, true)
+    end
+  end
+  return JSON.generate(status)
+end
+
+# get cluster status and return it to a remote gui or other client
+def cluster_status_remote(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::READ)
+    return 403, 'Permission denied'
+  end
+
+  cluster_name = $cluster_name
+  # If node is not in a cluster, return empty data
+  if not cluster_name or cluster_name.empty?
+    overview = {
+      :cluster_name => nil,
+      :error_list => [],
+      :warning_list => [],
+      :quorate => nil,
+      :status => 'unknown',
+      :node_list => [],
+      :resource_list => [],
+    }
+    return JSON.generate(overview)
+  end
+
+  cluster_nodes = get_nodes().flatten
+  status = cluster_status_from_nodes(session, cluster_nodes, cluster_name)
+  unless status
+    return 403, 'Permission denied'
+  end
+  return JSON.generate(status)
+end
+
+def cluster_start(params, request, session)
   if params[:name]
-    code, response = send_request_with_token(params[:name], 'cluster_start', true)
+    code, response = send_request_with_token(
+      session, params[:name], 'cluster_start', true
+    )
   else
+    if not allowed_for_local_cluster(session, Permissions::WRITE)
+      return 403, 'Permission denied'
+    end
     $logger.info "Starting Daemons"
     output =  `#{PCS} cluster start`
     $logger.debug output
@@ -155,15 +195,18 @@ def cluster_start(params)
   end
 end
 
-def cluster_stop(params)
+def cluster_stop(params, request, session)
   if params[:name]
     params_without_name = params.reject {|key, value|
       key == "name" or key == :name
     }
     code, response = send_request_with_token(
-      params[:name], 'cluster_stop', true, params_without_name
+      session, params[:name], 'cluster_stop', true, params_without_name
     )
   else
+    if not allowed_for_local_cluster(session, Permissions::WRITE)
+      return 403, 'Permission denied'
+    end
     options = []
     if params.has_key?("component")
       if params["component"].downcase == "pacemaker"
@@ -174,7 +217,7 @@ def cluster_stop(params)
     end
     options << "--force" if params["force"]
     $logger.info "Stopping Daemons"
-    stdout, stderr, retval = run_cmd(PCS, "cluster", "stop", *options)
+    stdout, stderr, retval = run_cmd(session, PCS, "cluster", "stop", *options)
     if retval != 0
       return [400, stderr.join]
     else
@@ -183,14 +226,17 @@ def cluster_stop(params)
   end
 end
 
-def config_backup(params)
+def config_backup(params, request, session)
   if params[:name]
     code, response = send_request_with_token(
-        params[:name], 'config_backup', true
+      session, params[:name], 'config_backup', true
     )
   else
+    if not allowed_for_local_cluster(session, Permissions::FULL)
+      return 403, 'Permission denied'
+    end
     $logger.info "Backup node configuration"
-    stdout, stderr, retval = run_cmd(PCS, "config", "backup")
+    stdout, stderr, retval = run_cmd(session, PCS, "config", "backup")
     if retval == 0
         $logger.info "Backup successful"
         return [200, stdout]
@@ -200,12 +246,16 @@ def config_backup(params)
   end
 end
 
-def config_restore(params)
+def config_restore(params, request, session)
   if params[:name]
     code, response = send_request_with_token(
-        params[:name], 'config_restore', true, {:tarball => params[:tarball]}
+      session, params[:name], 'config_restore', true,
+      {:tarball => params[:tarball]}
     )
   else
+    if not allowed_for_local_cluster(session, Permissions::FULL)
+      return 403, 'Permission denied'
+    end
     $logger.info "Restore node configuration"
     if params[:tarball] != nil and params[:tarball] != ""
       out = ""
@@ -231,10 +281,15 @@ def config_restore(params)
   end
 end
 
-def node_restart(params)
+def node_restart(params, request, session)
   if params[:name]
-    code, response = send_request_with_token(params[:name], 'node_restart', true)
+    code, response = send_request_with_token(
+      session, params[:name], 'node_restart', true
+    )
   else
+    if not allowed_for_local_cluster(session, Permissions::WRITE)
+      return 403, 'Permission denied'
+    end
     $logger.info "Restarting Node"
     output =  `/sbin/reboot`
     $logger.debug output
@@ -242,33 +297,48 @@ def node_restart(params)
   end
 end
 
-def node_standby(params)
+def node_standby(params, request, session)
   if params[:name]
-    code, response = send_request_with_token(params[:name], 'node_standby', true, {"node"=>params[:name]})
+    code, response = send_request_with_token(
+      session, params[:name], 'node_standby', true, {"node"=>params[:name]}
+    )
     # data={"node"=>params[:name]} for backward compatibility with older versions of pcs/pcsd
   else
+    if not allowed_for_local_cluster(session, Permissions::WRITE)
+      return 403, 'Permission denied'
+    end
     $logger.info "Standby Node"
-    stdout, stderr, retval = run_cmd(PCS,"cluster","standby")
+    stdout, stderr, retval = run_cmd(session, PCS, "cluster", "standby")
     return stdout
   end
 end
 
-def node_unstandby(params)
+def node_unstandby(params, request, session)
   if params[:name]
-    code, response = send_request_with_token(params[:name], 'node_unstandby', true, {"node"=>params[:name]})
+    code, response = send_request_with_token(
+      session, params[:name], 'node_unstandby', true, {"node"=>params[:name]}
+    )
     # data={"node"=>params[:name]} for backward compatibility with older versions of pcs/pcsd
   else
+    if not allowed_for_local_cluster(session, Permissions::WRITE)
+      return 403, 'Permission denied'
+    end
     $logger.info "Unstandby Node"
-    stdout, stderr, retval = run_cmd(PCS,"cluster","unstandby")
+    stdout, stderr, retval = run_cmd(session, PCS, "cluster", "unstandby")
     return stdout
   end
 end
 
-def cluster_enable(params)
+def cluster_enable(params, request, session)
   if params[:name]
-    code, response = send_request_with_token(params[:name], 'cluster_enable', true)
+    code, response = send_request_with_token(
+      session, params[:name], 'cluster_enable', true
+    )
   else
-    success = enable_cluster()
+    if not allowed_for_local_cluster(session, Permissions::WRITE)
+      return 403, 'Permission denied'
+    end
+    success = enable_cluster(session)
     if not success
       return JSON.generate({"error" => "true"})
     end
@@ -276,11 +346,16 @@ def cluster_enable(params)
   end
 end
 
-def cluster_disable(params)
+def cluster_disable(params, request, session)
   if params[:name]
-    code, response = send_request_with_token(params[:name], 'cluster_disable', true)
+    code, response = send_request_with_token(
+      session, params[:name], 'cluster_disable', true
+    )
   else
-    success = disable_cluster()
+    if not allowed_for_local_cluster(session, Permissions::WRITE)
+      return 403, 'Permission denied'
+    end
+    success = disable_cluster(session)
     if not success
       return JSON.generate({"error" => "true"})
     end
@@ -288,10 +363,16 @@ def cluster_disable(params)
   end
 end
 
-def get_quorum_info(params)
+def get_quorum_info(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::READ)
+    return 403, 'Permission denied'
+  end
   if ISRHEL6
-    stdout_status, stderr_status, retval = run_cmd(CMAN_TOOL, "status")
+    stdout_status, stderr_status, retval = run_cmd(
+      PCSAuth.getSuperuserSession, CMAN_TOOL, "status"
+    )
     stdout_nodes, stderr_nodes, retval = run_cmd(
+      PCSAuth.getSuperuserSession,
       CMAN_TOOL, "nodes", "-F", "id,type,votes,name"
     )
     if stderr_status.length > 0
@@ -302,7 +383,9 @@ def get_quorum_info(params)
       return stdout_status.join + "\n---Votes---\n" + stdout_nodes.join
     end
   else
-    stdout, stderr, retval = run_cmd(COROSYNC_QUORUMTOOL, "-p", "-s")
+    stdout, stderr, retval = run_cmd(
+      PCSAuth.getSuperuserSession, COROSYNC_QUORUMTOOL, "-p", "-s"
+    )
     # retval is 0 on success if node is not in partition with quorum
     # retval is 1 on error OR on success if node has quorum
     if stderr.length > 0
@@ -313,82 +396,369 @@ def get_quorum_info(params)
   end
 end
 
-def get_cib(params)
-  cib, stderr, retval = run_cmd(CIBADMIN, "-Ql")
+def get_cib(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::READ)
+    return 403, 'Permission denied'
+  end
+  cib, stderr, retval = run_cmd(session, CIBADMIN, "-Ql")
   if retval != 0
-    return [400, "Unable to get CIB: " + cib.to_s + stderr.to_s]
+    if not pacemaker_running?
+      return [400, '{"pacemaker_not_running":true}']
+    end
+    return [500, "Unable to get CIB: " + cib.to_s + stderr.to_s]
   else
     return [200, cib]
   end
 end
 
-def get_corosync_conf(params)
-  if ISRHEL6
-    f = File.open("/etc/cluster/cluster.conf",'r')
+def get_corosync_conf_remote(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::READ)
+    return 403, 'Permission denied'
+  end
+  return get_corosync_conf()
+end
+
+def set_cluster_conf(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::FULL)
+    return 403, 'Permission denied'
+  end
+  if params[:cluster_conf] != nil and params[:cluster_conf].strip != ""
+    Cfgsync::ClusterConf.backup()
+    Cfgsync::ClusterConf.from_text(params[:cluster_conf]).save()
+    return 200, 'Updated cluster.conf...'
   else
-    f = File.open("/etc/corosync/corosync.conf",'r')
+    $logger.info "Invalid cluster.conf file"
+    return 400, 'Failed to update cluster.conf...'
   end
-  return f.read
 end
 
-def set_cluster_conf(params)
-  if params[:cluster_conf] != nil and params[:cluster_conf] != ""
-    begin
-      FileUtils.cp(CLUSTER_CONF, CLUSTER_CONF + "." + Time.now.to_i.to_s)
-    rescue => e
-      $logger.debug "Exception trying to backup cluster.conf: " + e.inspect.to_s
+def set_corosync_conf(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::FULL)
+    return 403, 'Permission denied'
+  end
+  if params[:corosync_conf] != nil and params[:corosync_conf].strip != ""
+    Cfgsync::CorosyncConf.backup()
+    Cfgsync::CorosyncConf.from_text(params[:corosync_conf]).save()
+    return 200, "Succeeded"
+  else
+    $logger.info "Invalid corosync.conf file"
+    return 400, "Failed"
+  end
+end
+
+def get_sync_capabilities(params, request, session)
+  return JSON.generate({
+    'syncable_configs' => Cfgsync::get_cfg_classes_by_name().keys,
+  })
+end
+
+def set_sync_options(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::FULL)
+    return 403, 'Permission denied'
+  end
+
+  options = [
+    'sync_thread_pause', 'sync_thread_resume',
+    'sync_thread_disable', 'sync_thread_enable',
+  ]
+  if params.keys.count { |key| options.include?(key) } != 1
+    return [400, 'Exactly one option has to be specified']
+  end
+
+  if params['sync_thread_disable']
+    if Cfgsync::ConfigSyncControl.sync_thread_disable($semaphore_cfgsync)
+      return 'sync thread disabled'
+    else
+      return [400, 'sync thread disable error']
+    end
+  end
+
+  if params['sync_thread_enable']
+    if Cfgsync::ConfigSyncControl.sync_thread_enable()
+      return 'sync thread enabled'
+    else
+      return [400, 'sync thread enable error']
+    end
+  end
+
+  if params['sync_thread_resume']
+    if Cfgsync::ConfigSyncControl.sync_thread_resume()
+      return 'sync thread resumed'
+    else
+      return [400, 'sync thread resume error']
+    end
+  end
+
+  if params['sync_thread_pause']
+    if Cfgsync::ConfigSyncControl.sync_thread_pause(
+        $semaphore_cfgsync, params['sync_thread_pause']
+      )
+      return 'sync thread paused'
+    else
+      return [400, 'sync thread pause error']
     end
-    File.open("/etc/cluster/cluster.conf",'w') {|f|
-      f.write(params[:cluster_conf])
+  end
+
+  return [400, 'Exactly one option has to be specified']
+end
+
+def get_configs(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::FULL)
+    return 403, 'Permission denied'
+  end
+  if not $cluster_name or $cluster_name.empty?
+    return JSON.generate({'status' => 'not_in_cluster'})
+  end
+  if params[:cluster_name] != $cluster_name
+    return JSON.generate({'status' => 'wrong_cluster_name'})
+  end
+  out = {
+    'status' => 'ok',
+    'cluster_name' => $cluster_name,
+    'configs' => {},
+  }
+  Cfgsync::get_configs_local.each { |name, cfg|
+    out['configs'][cfg.class.name] = {
+      'type' => 'file',
+      'text' => cfg.text,
     }
-    return true
-  else
-    $logger.info "Invalid cluster.conf file"
-    return false
+  }
+  return JSON.generate(out)
+end
+
+def set_configs(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::FULL)
+    return 403, 'Permission denied'
+  end
+  return JSON.generate({'status' => 'bad_json'}) if not params['configs']
+  begin
+    configs_json = JSON.parse(params['configs'])
+  rescue JSON::ParserError
+    return JSON.generate({'status' => 'bad_json'})
+  end
+  has_cluster = !($cluster_name == nil or $cluster_name.empty?)
+  if has_cluster and $cluster_name != configs_json['cluster_name']
+    return JSON.generate({'status' => 'wrong_cluster_name'})
   end
+
+  $semaphore_cfgsync.synchronize {
+    force = configs_json['force']
+    remote_configs, unknown_cfg_names = Cfgsync::sync_msg_to_configs(configs_json)
+    local_configs = Cfgsync::get_configs_local
+
+    result = {}
+    unknown_cfg_names.each { |name| result[name] = 'not_supported' }
+    remote_configs.each { |name, remote_cfg|
+      begin
+        # Save a remote config if it is a newer version than local. If the config
+        # is not present on a local node, the node is beeing added to a cluster,
+        # so we need to save the config as well.
+        if force or not local_configs.key?(name) or remote_cfg > local_configs[name]
+          local_configs[name].class.backup() if local_configs.key?(name)
+          remote_cfg.save()
+          result[name] = 'accepted'
+        elsif remote_cfg == local_configs[name]
+          # Someone wants this node to have a config that it already has.
+          # So the desired state is met and the result is a success then.
+          result[name] = 'accepted'
+        else
+          result[name] = 'rejected'
+        end
+      rescue => e
+        $logger.error("Error saving config '#{name}': #{e}")
+        result[name] = 'error'
+      end
+    }
+    return JSON.generate({'status' => 'ok', 'result' => result})
+  }
 end
 
+def set_certs(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::FULL)
+    return 403, 'Permission denied'
+  end
 
-def set_corosync_conf(params)
-  if params[:corosync_conf] != nil and params[:corosync_conf] != ""
+  ssl_cert = (params['ssl_cert'] || '').strip
+  ssl_key = (params['ssl_key'] || '').strip
+  if ssl_cert.empty? and !ssl_key.empty?
+    return [400, 'cannot save ssl certificate without ssl key']
+  end
+  if !ssl_cert.empty? and ssl_key.empty?
+    return [400, 'cannot save ssl key without ssl certificate']
+  end
+  if !ssl_cert.empty? and !ssl_key.empty?
+    ssl_errors = verify_cert_key_pair(ssl_cert, ssl_key)
+    if ssl_errors and !ssl_errors.empty?
+      return [400, ssl_errors.join]
+    end
     begin
-      FileUtils.cp(COROSYNC_CONF,COROSYNC_CONF + "." + Time.now.to_i.to_s)
-    rescue
+      write_file_lock(CRT_FILE, 0700, ssl_cert)
+      write_file_lock(KEY_FILE, 0700, ssl_key)
+    rescue => e
+      # clean the files if we ended in the middle
+      # the files will be regenerated on next pcsd start
+      FileUtils.rm(CRT_FILE, {:force => true})
+      FileUtils.rm(KEY_FILE, {:force => true})
+      return [400, "cannot save ssl files: #{e}"]
+    end
+  end
+
+  if params['cookie_secret']
+    cookie_secret = params['cookie_secret'].strip
+    if !cookie_secret.empty?
+      begin
+        write_file_lock(COOKIE_FILE, 0700, cookie_secret)
+      rescue => e
+        return [400, "cannot save cookie secret: #{e}"]
+      end
+    end
+  end
+
+  return [200, 'success']
+end
+
+def get_permissions_remote(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::GRANT)
+    return 403, 'Permission denied'
+  end
+
+  pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+  data = {
+    'user_types' => Permissions::get_user_types(),
+    'permission_types' => Permissions::get_permission_types(),
+    'permissions_dependencies' => Permissions::permissions_dependencies(),
+    'users_permissions' => pcs_config.permissions_local.to_hash(),
+  }
+  return [200, JSON.generate(data)]
+end
+
+def set_permissions_remote(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::GRANT)
+    return 403, 'Permission denied'
+  end
+
+  begin
+    data = JSON.parse(params['json_data'])
+  rescue JSON::ParserError
+    return 400, JSON.generate({'status' => 'bad_json'})
+  end
+
+  user_set = {}
+  perm_list = []
+  full_users_new = Set.new
+  perm_deps = Permissions.permissions_dependencies
+  if data['permissions']
+    data['permissions'].each { |key, perm|
+      name = (perm['name'] || '').strip
+      type = (perm['type'] || '').strip
+      return [400, 'Missing user name'] if '' == name
+      return [400, 'Missing user type'] if '' == type
+      if not Permissions::is_user_type(type)
+        return [400, "Unknown user type '#{type}'"]
+      end
+
+      if user_set.key?([name, type])
+        return [400, "Duplicate permissions for #{type} #{name}"]
+      end
+      user_set[[name, type]] = true
+
+      allow = []
+      if perm['allow']
+        perm['allow'].each { |perm_allow, enabled|
+          next if "1" != enabled
+          if not Permissions::is_permission_type(perm_allow)
+            return [400, "Unknown permission '#{perm_allow}'"]
+          end
+          if Permissions::FULL == perm_allow
+            full_users_new << [type, name]
+          end
+          allow << perm_allow
+          # Explicitly save dependant permissions. That way if the dependency is
+          # changed in the future it won't revoke permissions which were once
+          # granted.
+          if perm_deps['also_allows'] and perm_deps['also_allows'][perm_allow]
+            allow += perm_deps['also_allows'][perm_allow]
+          end
+        }
+      end
+
+      perm_list << Permissions::EntityPermissions.new(type, name, allow.uniq())
+    }
+  end
+  perm_set = Permissions::PermissionsSet.new(perm_list)
+
+  full_users_old = Set.new
+  pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+  pcs_config.permissions_local.entity_permissions_list.each{ |entity_perm|
+    if entity_perm.allow_list.include?(Permissions::FULL)
+      full_users_old << [entity_perm.type, entity_perm.name]
     end
-    File.open("/etc/corosync/corosync.conf",'w') {|f|
-      f.write(params[:corosync_conf])
+  }
+
+  if full_users_new != full_users_old
+    label = 'Full'
+    Permissions.get_permission_types.each { |perm_type|
+      if Permissions::FULL == perm_type['code']
+        label = perm_type['label']
+        break
+      end
     }
-    return true
-  else
-    $logger.info "Invalid corosync.conf file"
-    return false
+    if not allowed_for_local_cluster(session, Permissions::FULL)
+      return [
+        403,
+        "Permission denied\nOnly #{SUPERUSER} and users with #{label} "\
+          + "permission can grant or revoke #{label} permission."
+      ]
+    end
   end
+
+  2.times {
+    pcs_config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+    pcs_config.permissions_local = perm_set
+    sync_config = Cfgsync::PcsdSettings.from_text(pcs_config.text())
+    pushed, _ = Cfgsync::save_sync_new_version(
+      sync_config, get_corosync_nodes(), $cluster_name, true
+    )
+    return [200, 'Permissions saved'] if pushed
+  }
+  return 400, 'Unable to save permissions'
 end
 
-def check_gui_status(params)
+def remote_pcsd_restart(params, request, session)
+  pcsd_restart()
+  return [200, 'success']
+end
+
+def check_gui_status(params, request, session)
   node_results = {}
   if params[:nodes] != nil and params[:nodes] != ""
     node_array = params[:nodes].split(",")
-    stdout, stderr, retval = run_cmd(PCS, "cluster", "pcsd-status", *node_array)
-    if retval == 0
-      stdout.each { |l|
-        l = l.chomp
-        out = l.split(/: /)
-        node_results[out[0].strip] = out[1]
-      }
-    end
+    online, offline, notauthorized = check_gui_status_of_nodes(
+      session, node_array
+    )
+    online.each { |node|
+      node_results[node] = "Online"
+    }
+    offline.each { |node|
+      node_results[node] = "Offline"
+    }
+    notauthorized.each { |node|
+      node_results[node] = "Unable to authenticate"
+    }
   end
   return JSON.generate(node_results)
 end
 
-def get_sw_versions(params)
+def get_sw_versions(params, request, session)
   if params[:nodes] != nil and params[:nodes] != ""
     nodes = params[:nodes].split(",")
     final_response = {}
     threads = []
     nodes.each {|node|
       threads << Thread.new {
-        code, response = send_request_with_token(node, 'get_sw_versions')
+        code, response = send_request_with_token(
+          session, node, 'get_sw_versions'
+        )
         begin
           node_response = JSON.parse(response)
           if node_response and node_response["notoken"] == true
@@ -412,14 +782,17 @@ def get_sw_versions(params)
   return JSON.generate(versions)
 end
 
-def remote_node_available(params)
-  if (not ISRHEL6 and File.exist?(COROSYNC_CONF)) or (ISRHEL6 and File.exist?(CLUSTER_CONF)) or File.exist?("/var/lib/pacemaker/cib/cib.xml")
+def remote_node_available(params, request, session)
+  if (not ISRHEL6 and File.exist?(Cfgsync::CorosyncConf.file_path)) or (ISRHEL6 and File.exist?(Cfgsync::ClusterConf.file_path)) or File.exist?("/var/lib/pacemaker/cib/cib.xml")
     return JSON.generate({:node_available => false})
   end
   return JSON.generate({:node_available => true})
 end
 
-def remote_add_node(params,all = false)
+def remote_add_node(params, request, session, all=false)
+  if not allowed_for_local_cluster(session, Permissions::FULL)
+    return 403, 'Permission denied'
+  end
   auto_start = false
   if params[:auto_start] and params[:auto_start] == "1"
     auto_start = true
@@ -430,56 +803,78 @@ def remote_add_node(params,all = false)
     if params[:new_ring1addr] != nil
       node += ',' + params[:new_ring1addr]
     end
-    retval, output = add_node(node, all, auto_start)
+    retval, output = add_node(session, node, all, auto_start)
   end
 
   if retval == 0
-    return [200,JSON.generate([retval,get_corosync_conf([])])]
+    return [200, JSON.generate([retval, get_corosync_conf()])]
   end
 
   return [400,output]
 end
 
-def remote_remove_nodes(params)
+def remote_remove_nodes(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::FULL)
+    return 403, 'Permission denied'
+  end
   count = 0
   out = ""
   node_list = []
+  options = []
   while params["nodename-" + count.to_s]
     node_list << params["nodename-" + count.to_s]
     count = count + 1
   end
+  options << "--force" if params["force"]
 
   cur_node = get_current_node_name()
   if i = node_list.index(cur_node)
     node_list.push(node_list.delete_at(i))
   end
 
+  # stop the nodes at once in order to:
+  # - prevent resources from moving pointlessly
+  # - get possible quorum loss warning
+  stop_params = node_list + options
+  stdout, stderr, retval = run_cmd(
+    session, PCS, "cluster", "stop", *stop_params
+  )
+  if retval != 0
+    return [400, stderr.join]
+  end
+
   node_list.each {|node|
-    retval, output = remove_node(node,true)
+    retval, output = remove_node(session, node, true)
     out = out + output.join("\n")
   }
-  config = PCSConfig.new
+  config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
   if config.get_nodes($cluster_name) == nil or config.get_nodes($cluster_name).length == 0
     return [200,"No More Nodes"]
   end
   return out
 end
 
-def remote_remove_node(params)
+def remote_remove_node(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::FULL)
+    return 403, 'Permission denied'
+  end
   if params[:remove_nodename] != nil
-    retval, output = remove_node(params[:remove_nodename])
+    retval, output = remove_node(session, params[:remove_nodename])
   else
-    return 404, "No nodename specified"
+    return 400, "No nodename specified"
   end
 
   if retval == 0
-    return JSON.generate([retval,get_corosync_conf([])])
+    return JSON.generate([retval, get_corosync_conf()])
   end
 
   return JSON.generate([retval,output])
 end
 
-def setup_cluster(params)
+def setup_cluster(params, request, session)
+  if not allowed_for_superuser(session)
+    return 403, 'Permission denied'
+  end
   $logger.info("Setting up cluster: " + params.inspect)
   nodes_rrp = params[:nodes].split(';')
   options = []
@@ -525,130 +920,162 @@ def setup_cluster(params)
   end
   nodes_options = nodes + options
   nodes_options += options_udp if transport_udp
-  stdout, stderr, retval = run_cmd(PCS, "cluster", "setup", "--enable", "--start", "--name",params[:clustername], *nodes_options)
+  stdout, stderr, retval = run_cmd(
+    session, PCS, "cluster", "setup", "--enable", "--start",
+    "--name", params[:clustername], *nodes_options
+  )
   if retval != 0
-    return [400, stdout.join("\n") + stderr.join("\n")]
+    return [
+      400,
+      (stdout + [''] + stderr).collect { |line| line.rstrip() }.join("\n")
+    ]
   end
   return 200
 end
 
-def create_cluster(params)
-  if set_corosync_conf(params)
-    cluster_start()
+def create_cluster(params, request, session)
+  if not allowed_for_superuser(session)
+    return 403, 'Permission denied'
+  end
+  if set_corosync_conf(params, request, session)
+    cluster_start(params, request, session)
   else
     return "Failed"
   end
 end
 
-def node_status(params)
-  if params[:node] != nil and params[:node] != "" and params[:node] != $cur_node_name
-    return send_request_with_token(params[:node],"status?hello=1")
+def node_status(params, request, session)
+  if params[:node] and params[:node] != '' and params[:node] !=
+    $cur_node_name and !params[:redirected]
+    return send_request_with_token(
+      session,
+      params[:node],
+      'status?redirected=1',
+      false,
+      params.select { |k,_|
+        [:version, :operations].include?(k)
+      }
+    )
   end
 
-  uptime = `cat /proc/uptime`.chomp.split(' ')[0].split('.')[0].to_i
-  mm, ss = uptime.divmod(60)
-  hh, mm = mm.divmod(60)
-  dd, hh = hh.divmod(24)
-  uptime = "%d days, %02d:%02d:%02d" % [dd, hh, mm, ss]
+  if not allowed_for_local_cluster(session, Permissions::READ)
+    return 403, 'Permission denied'
+  end
 
-  corosync_status = corosync_running?
-  corosync_enabled = corosync_enabled?
-  pacemaker_status = pacemaker_running?
-  pacemaker_enabled = pacemaker_enabled?
-  cman_status = cman_running?
-  pcsd_enabled = pcsd_enabled?
+  cib_dom = get_cib_dom(session)
+  crm_dom = get_crm_mon_dom(session)
 
-  corosync_online = []
-  corosync_offline = []
-  pacemaker_online = []
-  pacemaker_offline = []
-  pacemaker_standby = []
-  in_pacemaker = false
-  stdout, stderr, retval = run_cmd(PCS,"status","nodes","both")
-  stdout.each {|l|
-    l = l.chomp
-    if l.start_with?("Pacemaker Nodes:")
-      in_pacemaker = true
-    end
-    if l.end_with?(":")
-      next
-    end
+  status = get_node_status(session, cib_dom)
+  resources = get_resources(
+    cib_dom,
+    crm_dom,
+    (params[:operations] and params[:operations] == '1')
+  )
 
-    title,nodes = l.split(/: /,2)
-    if nodes == nil
-      next
-    end
+  node = ClusterEntity::Node.load_current_node(session, crm_dom)
 
-    if title == " Online"
-      in_pacemaker ? pacemaker_online.concat(nodes.split(/ /)) : corosync_online.concat(nodes.split(/ /))
-    elsif title == " Standby"
-      if in_pacemaker
-      	pacemaker_standby.concat(nodes.split(/ /))
+  _,_,not_authorized_nodes = check_gui_status_of_nodes(
+    session,
+    status[:known_nodes],
+    false,
+    3
+  )
+
+  if not_authorized_nodes.length > 0
+    node.warning_list << {
+      :message => 'Not authorized against node(s) ' + 
+        not_authorized_nodes.join(', '),
+      :type => 'nodes_not_authorized',
+      :node_list => not_authorized_nodes,
+    }
+  end
+
+  version = params[:version] || '1'
+
+  if version == '2'
+    status[:node] = node.to_status(version)
+    resource_list = nil
+    if resources
+      resource_list = []
+      resources.each do |r|
+        resource_list << r.to_status(version)
       end
-    else
-      in_pacemaker ? pacemaker_offline.concat(nodes.split(/ /)) : corosync_offline.concat(nodes.split(/ /))
     end
-  }
-  node_id = get_local_node_id()
-  resource_list, group_list = getResourcesGroups(false,true)
-  stonith_resource_list, stonith_group_list = getResourcesGroups(true,true)
-  stonith_resource_list.each {|sr| sr.stonith = true}
-  resource_list = resource_list + stonith_resource_list
-  acls = get_acls()
-  out_rl = []
-  resource_list.each {|r|
-    out_nodes = []
-    oConstraints = []
-    meta_attributes = []
-    r.meta_attr.each_pair {|k,v| meta_attributes << {:key => k, :value => v[1], :id => v[0], :parent => v[2]}}
-    r.nodes.each{|n|
-      out_nodes.push(n.name)
+
+    status[:resource_list] = resource_list
+
+    return JSON.generate(status)
+  end
+
+  resource_list = []
+  resources.each do |r|
+    resource_list.concat(r.to_status('1'))
+  end
+
+  cluster_settings = (status[:cluster_settings].empty?) ?
+    {'error' => 'Unable to get configuration settings'} :
+    status[:cluster_settings]
+
+  node_attr = {}
+  status[:node_attr].each { |node, attrs|
+    node_attr[node] = []
+    attrs.each { |attr|
+      node_attr[node] << {
+        :key => attr[:name],
+        :value => attr[:value]
+      }
     }
-    out_rl.push({:id => r.id, :agentname => r.agentname, :active => r.active,
-                 :nodes => out_nodes, :group => r.group, :clone => r.clone,
-                 :clone_id => r.clone_id, :ms_id => r.ms_id,
-                 :failed => r.failed, :orphaned => r.orphaned, :options => r.options,
-                 :stonith => r.stonith, :ms => r.ms, :disabled => r.disabled,
-                 :operations => r.operations, :instance_attr => r.instance_attr,
-                 :meta_attr => meta_attributes})
   }
-  constraints = getAllConstraints()
-  cluster_settings = getAllSettings()
-  node_attributes = get_node_attributes()
-  fence_levels = get_fence_levels()
-  status = {"uptime" => uptime, "corosync" => corosync_status, "pacemaker" => pacemaker_status,
-            "cman" => cman_status,
-            "corosync_enabled" => corosync_enabled, "pacemaker_enabled" => pacemaker_enabled,
-            "pcsd_enabled" => pcsd_enabled,
-            "corosync_online" => corosync_online, "corosync_offline" => corosync_offline,
-            "pacemaker_online" => pacemaker_online, "pacemaker_offline" => pacemaker_offline,
-            "pacemaker_standby" => pacemaker_standby,
-            "cluster_name" => $cluster_name, "resources" => out_rl, "groups" => group_list,
-            "constraints" => constraints, "cluster_settings" => cluster_settings, "node_id" => node_id,
-            "node_attr" => node_attributes, "fence_levels" => fence_levels,
-            "need_ring1_address" => need_ring1_address?,
-            "is_cman_with_udpu_transport" => is_cman_with_udpu_transport?,
-            "acls" => acls, "username" => cookies[:CIB_user]
-           }
-  ret = JSON.generate(status)
-  return ret
-end
-
-def status_all(params, nodes = [])
+
+  old_status = {
+    :uptime => node.uptime,
+    :corosync => node.corosync,
+    :pacemaker => node.pacemaker,
+    :cman => node.cman,
+    :corosync_enabled => node.corosync_enabled,
+    :pacemaker_enabled => node.pacemaker_enabled,
+    :pcsd_enabled => node.pcsd_enabled,
+    :corosync_online => status[:corosync_online],
+    :corosync_offline => status[:corosync_offline],
+    :pacemaker_online => status[:pacemaker_online],
+    :pacemaker_offline => status[:pacemaker_offline],
+    :pacemaker_standby => status[:pacemaker_standby],
+    :cluster_name => status[:cluster_name],
+    :resources => resource_list,
+    :groups => status[:groups],
+    :constraints => status[:constraints],
+    :cluster_settings => cluster_settings,
+    :node_id => node.id,
+    :node_attr => node_attr,
+    :fence_levels => status[:fence_levels],
+    :need_ring1_address => status[:need_ring1_address],
+    :is_cman_with_udpu_transport => status[:is_cman_with_udpu_transport],
+    :acls => status[:acls],
+    :username => status[:username]
+  }
+
+  return JSON.generate(old_status)
+end
+
+def status_all(params, request, session, nodes=[], dont_update_config=false)
   if nodes == nil
     return JSON.generate({"error" => "true"})
   end
 
   final_response = {}
   threads = []
+  forbidden_nodes = {}
   nodes.each {|node|
     threads << Thread.new {
-      code, response = send_request_with_token(node, 'status')
+      code, response = send_request_with_token(session, node, 'status')
+      if 403 == code
+        forbidden_nodes[node] = true
+      end
       begin
-	final_response[node] = JSON.parse(response)
+        final_response[node] = JSON.parse(response)
       rescue JSON::ParserError => e
-	final_response[node] = {"bad_json" => true}
-	$logger.info("ERROR: Parse Error when parsing status JSON from #{node}")
+        final_response[node] = {"bad_json" => true}
+        $logger.info("ERROR: Parse Error when parsing status JSON from #{node}")
       end
       if final_response[node] and final_response[node]["notoken"] == true
         $logger.error("ERROR: bad token for #{node}")
@@ -656,6 +1083,9 @@ def status_all(params, nodes = [])
     }
   }
   threads.each { |t| t.join }
+  if forbidden_nodes.length > 0
+    return 403, 'Permission denied'
+  end
 
   # Get full list of nodes and see if we need to update the configuration
   node_list = []
@@ -668,81 +1098,243 @@ def status_all(params, nodes = [])
 
   node_list.uniq!
   if node_list.length > 0
-    config = PCSConfig.new
+    config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
     old_node_list = config.get_nodes(params[:cluster])
-    if old_node_list & node_list != old_node_list or old_node_list.size!=node_list.size
-      $logger.info("Updating node list for: " + params[:cluster] + " " + old_node_list.inspect + "->" + node_list.inspect)
-      config.update(params[:cluster], node_list)
-      return status_all(params, node_list)
+    if !(dont_update_config or config.cluster_nodes_equal?(params[:cluster], node_list))
+      $logger.info("Updating node list for: #{params[:cluster]} #{old_node_list}->#{node_list}")
+      config.update_cluster(params[:cluster], node_list)
+      sync_config = Cfgsync::PcsdSettings.from_text(config.text())
+      # on version conflict just go on, config will be corrected eventually
+      # by displaying the cluster in the web UI
+      Cfgsync::save_sync_new_version(
+        sync_config, get_corosync_nodes(), $cluster_name, true
+      )
+      return status_all(params, request, session, node_list, true)
     end
   end
   $logger.debug("NODE LIST: " + node_list.inspect)
   return JSON.generate(final_response)
 end
 
-def auth(params,request)
-  token = PCSAuth.validUser(params['username'],params['password'], true, request)
+def clusters_overview(params, request, session)
+  cluster_map = {}
+  forbidden_clusters = {}
+  threads = []
+  config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+  config.clusters.each { |cluster|
+    threads << Thread.new {
+      cluster_map[cluster.name] = {
+        'cluster_name' => cluster.name,
+        'error_list' => [
+          {'message' => 'Unable to connect to the cluster. Request timeout.'}
+        ],
+        'warning_list' => [],
+        'status' => 'unknown',
+        'node_list' => get_default_overview_node_list(cluster.name),
+        'resource_list' => []
+      }
+      overview_cluster = nil
+      online, offline, not_authorized_nodes = check_gui_status_of_nodes(
+        session,
+        get_cluster_nodes(cluster.name),
+        false,
+        3
+      )
+      not_supported = false
+      forbidden = false
+      cluster_nodes_auth = (online + offline).uniq
+      cluster_nodes_all = (cluster_nodes_auth + not_authorized_nodes).uniq
+      nodes_not_in_cluster = []
+      for node in cluster_nodes_auth
+        code, response = send_request_with_token(
+          session, node, 'cluster_status', true, {}, true, nil, 8
+        )
+        if code == 404
+          not_supported = true
+          next
+        end
+        if 403 == code
+          forbidden = true
+          forbidden_clusters[cluster.name] = true
+          break
+        end
+        begin
+          parsed_response = JSON.parse(response)
+          if parsed_response['noresponse'] or parsed_response['pacemaker_not_running']
+            next
+          elsif parsed_response['notoken'] or parsed_response['notauthorized']
+            next
+          elsif parsed_response['cluster_name'] != cluster.name
+            # queried node is not in the cluster (any more)
+            nodes_not_in_cluster << node
+            next
+          else
+            overview_cluster = parsed_response
+            break
+          end
+        rescue JSON::ParserError
+        end
+      end
+
+      if cluster_nodes_all.sort == nodes_not_in_cluster.sort
+        overview_cluster = {
+          'cluster_name' => cluster.name,
+          'error_list' => [],
+          'warning_list' => [],
+          'status' => 'unknown',
+          'node_list' => [],
+          'resource_list' => []
+        }
+      end
+
+      if not overview_cluster
+        overview_cluster = {
+          'cluster_name' => cluster.name,
+          'error_list' => [],
+          'warning_list' => [],
+          'status' => 'unknown',
+          'node_list' => get_default_overview_node_list(cluster.name),
+          'resource_list' => []
+        }
+        if not_supported
+          overview_cluster['warning_list'] = [
+            {
+              'message' => 'Cluster is running an old version of pcs/pcsd which does not provide data for the dashboard.',
+            },
+          ]
+        else
+          if forbidden
+            overview_cluster['error_list'] = [
+              {
+                'message' => 'You do not have permissions to view the cluster.',
+                'type' => 'forbidden',
+              },
+            ]
+            overview_cluster['node_list'] = []
+          else
+            overview_cluster['error_list'] = [
+              {
+                'message' => 'Unable to connect to the cluster.',
+              },
+            ]
+          end
+        end
+      end
+      if not_authorized_nodes.length > 0
+        overview_cluster['warning_list'] << {
+          'message' => 'GUI is not authorized against node(s) '\
+            + not_authorized_nodes.join(', '),
+          'type' => 'nodes_not_authorized',
+          'node_list' => not_authorized_nodes,
+        }
+      end
+
+      overview_cluster['node_list'].each { |node|
+        if node['status_version'] == '1'
+          overview_cluster['warning_list'] << {
+            :message => 'Some nodes are running old version of pcs/pcsd.'
+          }
+          break
+        end
+      }
+
+      cluster_map[cluster.name] = overview_cluster
+    }
+  }
+
+  begin
+    Timeout::timeout(18) {
+      threads.each { |t| t.join }
+    }
+  rescue Timeout::Error
+    threads.each { |t| t.exit }
+  end
+
+  # update clusters in PCSConfig
+  not_current_data = false
+  config = PCSConfig.new(Cfgsync::PcsdSettings.from_file('{}').text())
+  cluster_map.each { |cluster, values|
+    next if forbidden_clusters[cluster]
+    nodes = []
+    values['node_list'].each { |node|
+      nodes << node['name']
+    }
+    if !config.cluster_nodes_equal?(cluster, nodes)
+      $logger.info("Updating node list for: #{cluster} #{config.get_nodes(cluster)}->#{nodes}")
+      config.update_cluster(cluster, nodes)
+      not_current_data = true
+    end
+  }
+  if not_current_data
+    sync_config = Cfgsync::PcsdSettings.from_text(config.text())
+    # on version conflict just go on, config will be corrected eventually
+    # by displaying the cluster in the web UI
+    Cfgsync::save_sync_new_version(
+      sync_config, get_corosync_nodes(), $cluster_name, true
+    )
+  end
+
+  overview = {
+    'not_current_data' => not_current_data,
+    'cluster_list' => cluster_map.values.sort { |a, b|
+      a['clustername'] <=> b['clustername']
+    }
+  }
+  return JSON.generate(overview)
+end
+
+def auth(params, request, session)
+  token = PCSAuth.validUser(params['username'],params['password'], true)
   # If we authorized to this machine, attempt to authorize everywhere
   node_list = []
   if token and params["bidirectional"]
     params.each { |k,v|
       if k.start_with?("node-")
-	node_list.push(v)
+        node_list.push(v)
       end
     }
     if node_list.length > 0
-      pcs_auth(node_list, params['username'], params['password'], params["force"] == "1")
+      pcs_auth(
+        session, node_list, params['username'], params['password'],
+        params["force"] == "1"
+      )
     end
   end
   return token
 end
 
-# We can't pass username/password on the command line for security purposes
-def pcs_auth(nodes, username, password, force=false)
-  command = [PCS, "cluster", "auth", "--local"] + nodes
-  command += ["--force"] if force
-  Open4::popen4(*command) {|pid, stdin, stdout, stderr|
-    begin
-      while line = stdout.readpartial(4096)
-	if line =~ /Username: \Z/
-	  stdin.write(username + "\n")
-	end
-
-	if line =~ /Password: \Z/
-	  stdin.write(password + "\n")
-	end
-      end
-    rescue EOFError
-    end
-  }
-end
-
 # If we get here, we're already authorized
-def check_auth(params, request)
-  retval, node_list = get_token_node_list()
+def check_auth(params, request, session)
+  if params.include?("check_auth_only")
+    return [200, "{\"success\":true}"]
+  end
   return JSON.generate({
-    'success' => retval == 0,
-    'node_list' => node_list.collect { |item| item.strip },
+    'success' => true,
+    'node_list' => get_token_node_list,
   })
 end
 
-def resource_status(params)
+# not used anymore, left here for backward compatability reasons
+def resource_status(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::READ)
+    return 403, 'Permission denied'
+  end
   resource_id = params[:resource]
-  @resources, at groups = getResourcesGroups
+  @resources, at groups = getResourcesGroups(session)
   location = ""
   res_status = ""
   @resources.each {|r|
     if r.id == resource_id
       if r.failed
-	res_status =  "Failed"
+        res_status =  "Failed"
       elsif !r.active
-	res_status = "Inactive"
+        res_status = "Inactive"
       else
-	res_status = "Running"
+        res_status = "Running"
       end
       if r.nodes.length != 0
-	location = r.nodes[0].name
-	break
+        location = r.nodes[0].name
+        break
       end
     end
   }
@@ -750,8 +1342,13 @@ def resource_status(params)
   return JSON.generate(status)
 end
 
-def resource_stop(params)
-  stdout, stderr, retval = run_cmd(PCS,"resource","disable", params[:resource])
+def resource_stop(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  stdout, stderr, retval = run_cmd(
+    session, PCS, "resource", "disable", params[:resource]
+  )
   if retval == 0
     return JSON.generate({"success" => "true"})
   else
@@ -759,8 +1356,13 @@ def resource_stop(params)
   end
 end
 
-def resource_cleanup(params)
-  stdout, stderr, retval = run_cmd(PCS,"resource","cleanup", params[:resource])
+def resource_cleanup(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  stdout, stderr, retval = run_cmd(
+    session, PCS, "resource", "cleanup", params[:resource]
+  )
   if retval == 0
     return JSON.generate({"success" => "true"})
   else
@@ -768,8 +1370,13 @@ def resource_cleanup(params)
   end
 end
 
-def resource_start(params)
-  stdout, stderr, retval = run_cmd(PCS,"resource","enable", params[:resource])
+def resource_start(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  stdout, stderr, retval = run_cmd(
+    session, PCS, "resource", "enable", params[:resource]
+  )
   if retval == 0
     return JSON.generate({"success" => "true"})
   else
@@ -777,21 +1384,29 @@ def resource_start(params)
   end
 end
 
-def resource_form(params)
-  @resources, @groups, retval = getResourcesGroups()
-  if retval != 0
-    return [200,'{"noresponse":true}']
-  end
-  @existing_resource = true
-  @resources.each do |r|
-    if r.id == params[:resource]
-      @cur_resource = r
+def resource_form(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::READ)
+    return 403, 'Permission denied'
+  end
+
+  cib_dom = get_cib_dom(session)
+  @cur_resource = get_resource_by_id(params[:resource], cib_dom)
+  @groups = get_resource_groups(cib_dom)
+  @version = params[:version]
+
+  if @cur_resource.instance_of?(ClusterEntity::Primitive) and !@cur_resource.stonith
+    @cur_resource_group = @cur_resource.get_group
+    @cur_resource_clone = @cur_resource.get_clone
+    @cur_resource_ms = @cur_resource.get_master
+    @resource = ResourceAgent.new(@cur_resource.agentname)
+    if @cur_resource.provider == 'heartbeat'
+      @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, HEARTBEAT_AGENTS_DIR + @cur_resource.type)
+    elsif @cur_resource.provider == 'pacemaker'
+      @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, PACEMAKER_AGENTS_DIR + @cur_resource.type)
+    elsif @cur_resource._class == 'nagios'
+      @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, NAGIOS_METADATA_DIR + @cur_resource.type + '.xml')
     end
-  end
-  if @cur_resource
-    @cur_resource.options = getResourceOptions(@cur_resource.id)
-    @resource_agents = getResourceAgents(@cur_resource.agentname)
-    @resource = @resource_agents[@cur_resource.agentname.gsub('::',':')]
+    @existing_resource = true
     if @resource
       erb :resourceagentform
     else
@@ -802,25 +1417,17 @@ def resource_form(params)
   end
 end
 
-def fence_device_form(params)
-  @resources, @groups, retval = getResourcesGroups(true)
-  if retval != 0
-    return [200,'{"noresponse":true}']
+def fence_device_form(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::READ)
+    return 403, 'Permission denied'
   end
 
-  @cur_resource = nil
-  @resources.each do |r|
-    if r.id == params[:resource]
-      @cur_resource = r
-      break
-    end
-  end
+  @cur_resource = get_resource_by_id(params[:resource], get_cib_dom(session))
 
-  if @cur_resource
-    @cur_resource.options = getResourceOptions(@cur_resource.id,true)
-    @resource_agents = getFenceAgents(@cur_resource.agentname)
+  if @cur_resource.instance_of?(ClusterEntity::Primitive) and @cur_resource.stonith
+    @resource_agents = getFenceAgents(session, @cur_resource.agentname)
     @existing_resource = true
-    @fenceagent = @resource_agents[@cur_resource.agentname.gsub(/.*:/,"")]
+    @fenceagent = @resource_agents[@cur_resource.type]
     erb :fenceagentform
   else
     "Can't find fence device"
@@ -828,26 +1435,35 @@ def fence_device_form(params)
 end
 
 # Creates resource if params[:resource_id] is not set
-def update_resource (params)
-  param_line = getParamLine(params)
+def update_resource (params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+
+  param_line = getParamList(params)
   if not params[:resource_id]
-    out, stderr, retval = run_cmd(PCS, "resource", "create", params[:name], params[:resource_type],
-	    *(param_line.split(" ")))
+    out, stderr, retval = run_cmd(
+      session,
+      PCS, "resource", "create", params[:name], params[:resource_type],
+      *param_line
+    )
     if retval != 0
       return JSON.generate({"error" => "true", "stderr" => stderr, "stdout" => out})
     end
     if params[:resource_group] and params[:resource_group] != ""
-      run_cmd(PCS, "resource","group", "add", params[:resource_group],
-	      params[:name])
+      run_cmd(
+        session,
+        PCS, "resource","group", "add", params[:resource_group], params[:name]
+      )
       resource_group = params[:resource_group]
     end
 
     if params[:resource_clone] and params[:resource_clone] != ""
       name = resource_group ? resource_group : params[:name]
-      run_cmd(PCS, "resource", "clone", name)
+      run_cmd(session, PCS, "resource", "clone", name)
     elsif params[:resource_ms] and params[:resource_ms] != ""
       name = resource_group ? resource_group : params[:name]
-      run_cmd(PCS, "resource", "master", name)
+      run_cmd(session, PCS, "resource", "master", name)
     end
 
     return JSON.generate({})
@@ -858,46 +1474,64 @@ def update_resource (params)
     if params[:resource_clone]
       params[:resource_id].sub!(/(.*):.*/,'\1')
     end
-    run_cmd(PCS, "resource", "update", params[:resource_id], *(param_line.split(" ")))
+    run_cmd(
+      session, PCS, "resource", "update", params[:resource_id], *param_line
+    )
   end
 
   if params[:resource_group]
     if params[:resource_group] == ""
       if params[:_orig_resource_group] != ""
-	run_cmd(PCS, "resource", "group", "remove", params[:_orig_resource_group], params[:resource_id])
+        run_cmd(
+          session, PCS, "resource", "group", "remove",
+          params[:_orig_resource_group], params[:resource_id]
+        )
       end
     else
-      run_cmd(PCS, "resource", "group", "add", params[:resource_group], params[:resource_id])
+      run_cmd(
+        session, PCS, "resource", "group", "add", params[:resource_group],
+        params[:resource_id]
+      )
     end
   end
 
   if params[:resource_clone] and params[:_orig_resource_clone] == "false"
-    run_cmd(PCS, "resource", "clone", params[:resource_id])
+    run_cmd(session, PCS, "resource", "clone", params[:resource_id])
   end
   if params[:resource_ms] and params[:_orig_resource_ms] == "false"
-    run_cmd(PCS, "resource", "master", params[:resource_id] + "-master", params[:resource_id])
+    run_cmd(session, PCS, "resource", "master", params[:resource_id])
   end
 
   if params[:_orig_resource_clone] == "true" and not params[:resource_clone]
-    run_cmd(PCS, "resource", "unclone", params[:resource_id].sub(/:.*/,''))
+    run_cmd(
+      session, PCS, "resource", "unclone", params[:resource_id].sub(/:.*/,'')
+    )
   end
   if params[:_orig_resource_ms] == "true" and not params[:resource_ms]
-    run_cmd(PCS, "resource", "unclone", params[:resource_id].sub(/:.*/,''))
+    run_cmd(
+      session, PCS, "resource", "unclone", params[:resource_id].sub(/:.*/,'')
+    )
   end
 
   return JSON.generate({})
 end
 
-def update_fence_device (params)
-  logger.info "Updating fence device"
-  logger.info params
-  param_line = getParamLine(params)
-  logger.info param_line
+def update_fence_device(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+
+  $logger.info "Updating fence device"
+  $logger.info params
+  param_line = getParamList(params)
+  $logger.info param_line
 
-  param_line = getParamLine(params)
   if not params[:resource_id]
-    out, stderr, retval = run_cmd(PCS, "stonith", "create", params[:name], params[:resource_type],
-	    *(param_line.split(" ")))
+    out, stderr, retval = run_cmd(
+      session,
+      PCS, "stonith", "create", params[:name], params[:resource_type],
+      *param_line
+    )
     if retval != 0
       return JSON.generate({"error" => "true", "stderr" => stderr, "stdout" => out})
     end
@@ -905,7 +1539,9 @@ def update_fence_device (params)
   end
 
   if param_line.length != 0
-    out, stderr, retval = run_cmd(PCS, "stonith", "update", params[:resource_id], *(param_line.split(" ")))
+    out, stderr, retval = run_cmd(
+      session, PCS, "stonith", "update", params[:resource_id], *param_line
+    )
     if retval != 0
       return JSON.generate({"error" => "true", "stderr" => stderr, "stdout" => out})
     end
@@ -913,80 +1549,119 @@ def update_fence_device (params)
   return "{}"
 end
 
-def get_avail_resource_agents (params)
-  agents = getResourceAgents()
+def get_avail_resource_agents(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::READ)
+    return 403, 'Permission denied'
+  end
+  agents = getResourceAgents(session)
   return JSON.generate(agents)
 end
 
-def get_avail_fence_agents(params)
-  agents = getFenceAgents()
+def get_avail_fence_agents(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::READ)
+    return 403, 'Permission denied'
+  end
+  agents = getFenceAgents(session)
   return JSON.generate(agents)
 end
 
-def resource_metadata (params)
+def resource_metadata(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::READ)
+    return 403, 'Permission denied'
+  end
   return 200 if not params[:resourcename] or params[:resourcename] == ""
   resource_name = params[:resourcename][params[:resourcename].rindex(':')+1..-1]
   class_provider = params[:resourcename][0,params[:resourcename].rindex(':')]
 
   @resource = ResourceAgent.new(params[:resourcename])
   if class_provider == "ocf:heartbeat"
-    @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(HEARTBEAT_AGENTS_DIR + resource_name)
+    @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, HEARTBEAT_AGENTS_DIR + resource_name)
   elsif class_provider == "ocf:pacemaker"
-    @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(PACEMAKER_AGENTS_DIR + resource_name)
+    @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, PACEMAKER_AGENTS_DIR + resource_name)
+  elsif class_provider == 'nagios'
+    @resource.required_options, @resource.optional_options, @resource.info = getResourceMetadata(session, NAGIOS_METADATA_DIR + resource_name + '.xml')
   end
   @new_resource = params[:new]
-  @resources, @groups = getResourcesGroups
-  
+  @resources, @groups = getResourcesGroups(session)
+
   erb :resourceagentform
 end
 
-def fence_device_metadata (params)
+def fence_device_metadata(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::READ)
+    return 403, 'Permission denied'
+  end
   return 200 if not params[:resourcename] or params[:resourcename] == ""
   @fenceagent = FenceAgent.new(params[:resourcename])
-  @fenceagent.required_options, @fenceagent.optional_options, @fenceagent.advanced_options, @fenceagent.info = getFenceAgentMetadata(params[:resourcename])
+  @fenceagent.required_options, @fenceagent.optional_options, @fenceagent.advanced_options, @fenceagent.info = getFenceAgentMetadata(session, params[:resourcename])
   @new_fenceagent = params[:new]
   
   erb :fenceagentform
 end
 
-def remove_resource (params)
+def remove_resource(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  force = params['force']
+  no_error_if_not_exists = params.include?('no_error_if_not_exists')
   errors = ""
   params.each { |k,v|
     if k.index("resid-") == 0
-      out, errout, retval = run_cmd(PCS, "--force", "resource", "delete", k.gsub("resid-",""))
+      resid = k.gsub('resid-', '')
+      command = [PCS, 'resource', 'delete', resid]
+      command << '--force' if force
+      out, errout, retval = run_cmd(session, *command)
       if retval != 0
-	errors += "Unable to remove: " + k.gsub("resid-","") + "\n"
+        unless out.index(" does not exist.") != -1 and no_error_if_not_exists  
+          errors += errout.join(' ').strip + "\n"
+        end
       end
     end
   }
+  errors.strip!
   if errors == ""
     return 200
   else
-    logger.info("Remove resource errors:\n"+errors)
-    return [500, errors]
+    $logger.info("Remove resource errors:\n"+errors)
+    return [400, errors]
   end
 end
 
-def add_fence_level_remote(params)
-  retval = add_fence_level(params["level"], params["devices"], params["node"], params["remove"])
+def add_fence_level_remote(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  retval, stdout, stderr = add_fence_level(
+    session, params["level"], params["devices"], params["node"], params["remove"]
+  )
   if retval == 0
     return [200, "Successfully added fence level"]
   else
-    return [400, "Error adding fence level"]
+    return [400, stderr]
   end
 end
 
-def add_node_attr_remote(params)
-  retval = add_node_attr(params["node"], params["key"], params["value"])
-  if retval == 0
+def add_node_attr_remote(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  retval = add_node_attr(
+    session, params["node"], params["key"], params["value"]
+  )
+  # retval = 2 if removing attr which doesn't exist
+  if retval == 0 or retval == 2
     return [200, "Successfully added attribute to node"]
   else
     return [400, "Error adding attribute to node"]
   end
 end
 
-def add_acl_role_remote(params)
-  retval = add_acl_role(params["name"], params["description"])
+def add_acl_role_remote(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::GRANT)
+    return 403, 'Permission denied'
+  end
+  retval = add_acl_role(session, params["name"], params["description"])
   if retval == ""
     return [200, "Successfully added ACL role"]
   else
@@ -997,12 +1672,15 @@ def add_acl_role_remote(params)
   end
 end
 
-def remove_acl_roles_remote(params)
+def remove_acl_roles_remote(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::GRANT)
+    return 403, 'Permission denied'
+  end
   errors = ""
   params.each { |name, value|
     if name.index("role-") == 0
       out, errout, retval = run_cmd(
-        PCS, "acl", "role", "delete", value.to_s, "--autodelete"
+        session, PCS, "acl", "role", "delete", value.to_s, "--autodelete"
       )
       if retval != 0
         errors += "Unable to remove role #{value}"
@@ -1021,14 +1699,18 @@ def remove_acl_roles_remote(params)
   end
 end
 
-def add_acl_remote(params)
+def add_acl_remote(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::GRANT)
+    return 403, 'Permission denied'
+  end
   if params["item"] == "permission"
     retval = add_acl_permission(
+      session,
       params["role_id"], params["type"], params["xpath_id"], params["query_id"]
     )
   elsif (params["item"] == "user") or (params["item"] == "group")
     retval = add_acl_usergroup(
-      params["role_id"], params["item"], params["usergroup"]
+      session, params["role_id"], params["item"], params["usergroup"]
     )
   else
     retval = "Error: Unknown adding request"
@@ -1044,11 +1726,16 @@ def add_acl_remote(params)
   end
 end
 
-def remove_acl_remote(params)
+def remove_acl_remote(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::GRANT)
+    return 403, 'Permission denied'
+  end
   if params["item"] == "permission"
-    retval = remove_acl_permission(params["acl_perm_id"])
+    retval = remove_acl_permission(session, params["acl_perm_id"])
   elsif params["item"] == "usergroup"
-    retval = remove_acl_usergroup(params["role_id"],params["usergroup_id"])
+    retval = remove_acl_usergroup(
+      session, params["role_id"],params["usergroup_id"]
+    )
   else
     retval = "Error: Unknown removal request"
   end
@@ -1060,8 +1747,13 @@ def remove_acl_remote(params)
   end
 end
 
-def add_meta_attr_remote(params)
-  retval = add_meta_attr(params["res_id"], params["key"],params["value"])
+def add_meta_attr_remote(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+  retval = add_meta_attr(
+    session, params["res_id"], params["key"],params["value"]
+  )
   if retval == 0
     return [200, "Successfully added meta attribute"]
   else
@@ -1069,11 +1761,16 @@ def add_meta_attr_remote(params)
   end
 end
 
-def add_constraint_remote(params)
+def add_constraint_remote(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
   case params["c_type"]
   when "loc"
     retval, error = add_location_constraint(
-      params["res_id"], params["node_id"], params["score"]
+      session,
+      params["res_id"], params["node_id"], params["score"], params["force"],
+      !params['disable_autocorrect']
     )
   when "ord"
     resA = params["res_id"]
@@ -1086,7 +1783,9 @@ def add_constraint_remote(params)
     end
 
     retval, error = add_order_constraint(
-      resA, resB, actionA, actionB, params["score"], true, params["force"]
+      session,
+      resA, resB, actionA, actionB, params["score"], true, params["force"],
+      !params['disable_autocorrect']
     )
   when "col"
     resA = params["res_id"]
@@ -1094,14 +1793,15 @@ def add_constraint_remote(params)
     score = params["score"]
     if params["colocation_type"] == "apart"
       if score.length > 0 and score[0] != "-"
-      	score = "-" + score
+        score = "-" + score
       elsif score == ""
-      	score = "-INFINITY"
+        score = "-INFINITY"
       end
     end
 
     retval, error = add_colocation_constraint(
-      resA, resB, score, params["force"]
+      session,
+      resA, resB, score, params["force"], !params['disable_autocorrect']
     )
   else
     return [400, "Unknown constraint type: #{params['c_type']}"]
@@ -1114,10 +1814,15 @@ def add_constraint_remote(params)
   end
 end
 
-def add_constraint_rule_remote(params)
+def add_constraint_rule_remote(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
   if params["c_type"] == "loc"
     retval, error = add_location_constraint_rule(
-      params["res_id"], params["rule"], params["score"], params["force"]
+      session,
+      params["res_id"], params["rule"], params["score"], params["force"],
+      !params['disable_autocorrect']
     )
   else
     return [400, "Unknown constraint type: #{params["c_type"]}"]
@@ -1130,11 +1835,15 @@ def add_constraint_rule_remote(params)
   end
 end
 
-def add_constraint_set_remote(params)
+def add_constraint_set_remote(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
   case params["c_type"]
   when "ord"
     retval, error = add_order_set_constraint(
-      params["resources"].values, params["force"]
+      session,
+      params["resources"].values, params["force"], !params['disable_autocorrect']
     )
   else
     return [400, "Unknown constraint type: #{params["c_type"]}"]
@@ -1147,9 +1856,12 @@ def add_constraint_set_remote(params)
   end
 end
 
-def remove_constraint_remote(params)
+def remove_constraint_remote(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
   if params[:constraint_id]
-    retval = remove_constraint(params[:constraint_id])
+    retval = remove_constraint(session, params[:constraint_id])
     if retval == 0
       return "Constraint #{params[:constraint_id]} removed"
     else
@@ -1160,9 +1872,12 @@ def remove_constraint_remote(params)
   end
 end
 
-def remove_constraint_rule_remote(params)
+def remove_constraint_rule_remote(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
   if params[:rule_id]
-    retval = remove_constraint_rule(params[:rule_id])
+    retval = remove_constraint_rule(session, params[:rule_id])
     if retval == 0
       return "Constraint rule #{params[:rule_id]} removed"
     else
@@ -1173,10 +1888,15 @@ def remove_constraint_rule_remote(params)
   end
 end
 
-def add_group(params)
+def add_group(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
   rg = params["resource_group"]
   resources = params["resources"]
-  output, errout, retval = run_cmd(PCS, "resource", "group", "add", rg, *(resources.split(" ")))
+  output, errout, retval = run_cmd(
+    session, PCS, "resource", "group", "add", rg, *(resources.split(" "))
+  )
   if retval == 0
     return 200
   else
@@ -1184,35 +1904,72 @@ def add_group(params)
   end
 end
 
-def update_cluster_settings(params)
+def update_cluster_settings(params, request, session)
   settings = params["config"]
   hidden_settings = params["hidden"]
-  output = ""
   hidden_settings.each{|name,val|
     found = false
     settings.each{|name2,val2|
       if name == name2
-	found = true
-	break
+        found = true
+        break
       end
     }
     if not found
       settings[name] = val
     end
   }
+  settings.each { |_, val| val.strip!() }
+
+  binary_settings = []
+  changed_settings = []
+  old_settings = {}
+  getConfigOptions2(
+    PCSAuth.getSuperuserSession(), get_nodes().flatten()
+  ).values().flatten().each { |opt|
+    binary_settings << opt.configname if "check" == opt.type
+    # if we don't know current value of an option, consider it changed
+    next if opt.value.nil?
+    if "check" == opt.type
+      old_settings[opt.configname] = is_cib_true(opt.value)
+    else
+      old_settings[opt.configname] = opt.value
+    end
+  }
+  settings.each { |key, val|
+    new_val = binary_settings.include?(key) ? is_cib_true(val) : val
+    # if we don't know current value of an option, consider it changed
+    if (not old_settings.key?(key)) or (old_settings[key] != new_val)
+      changed_settings << key.downcase()
+    end
+  }
+  if changed_settings.include?('enable-acl')
+    if not allowed_for_local_cluster(session, Permissions::GRANT)
+      return 403, 'Permission denied'
+    end
+  end
+  if changed_settings.count { |x| x != 'enable-acl' } > 0
+    if not allowed_for_local_cluster(session, Permissions::WRITE)
+      return 403, 'Permission denied'
+    end
+  end
 
-  settings.each{|name,val|
+  changed_settings.each { |name|
+    val = settings[name]
     if name == "enable-acl"
-      run_cmd(PCS, "property", "set", name + "=" + val, "--force")
+      run_cmd(session, PCS, "property", "set", name + "=" + val, "--force")
     else
-      run_cmd(PCS, "property", "set", name + "=" + val)
+      run_cmd(session, PCS, "property", "set", name + "=" + val)
     end
   }
   return [200, "Update Successful"]
 end
 
-def cluster_destroy(params)
-  out, errout, retval = run_cmd(PCS, "cluster", "destroy")
+def cluster_destroy(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::FULL)
+    return 403, 'Permission denied'
+  end
+  out, errout, retval = run_cmd(session, PCS, "cluster", "destroy")
   if retval == 0
     return [200, "Successfully destroyed cluster"]
   else
@@ -1220,7 +1977,10 @@ def cluster_destroy(params)
   end
 end
 
-def get_wizard(params)
+def get_wizard(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::READ)
+    return 403, 'Permission denied'
+  end
   wizard = PCSDWizard.getWizard(params["wizard"])
   if wizard != nil
     return erb wizard.collection_page
@@ -1229,7 +1989,10 @@ def get_wizard(params)
   end
 end
 
-def wizard_submit(params)
+def wizard_submit(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
   wizard = PCSDWizard.getWizard(params["wizard"])
   if wizard != nil
     return erb wizard.process_responses(params)
@@ -1239,67 +2002,321 @@ def wizard_submit(params)
 
 end
 
-def get_local_node_id
-  if ISRHEL6
-    out, errout, retval = run_cmd(COROSYNC_CMAPCTL, "cluster.cman")
-    if retval != 0
-      return ""
+def auth_gui_against_nodes(params, request, session)
+  node_auth_error = {}
+  new_tokens = {}
+  threads = []
+  params.each { |node|
+    threads << Thread.new {
+      if node[0].end_with?("-pass") and node[0].length > 5
+        nodename = node[0][0..-6]
+        if params.has_key?("all")
+          pass = params["pass-all"]
+        else
+          pass = node[1]
+        end
+        data = {
+          'node-0' => nodename,
+          'username' => SUPERUSER,
+          'password' => pass,
+          'force' => 1,
+        }
+        node_auth_error[nodename] = 1
+        code, response = send_request(session, nodename, 'auth', true, data)
+        if 200 == code
+          token = response.strip
+          if not token.empty?
+            new_tokens[nodename] = token
+            node_auth_error[nodename] = 0
+          end
+        end
+      end
+    }
+  }
+  threads.each { |t| t.join }
+
+  if not new_tokens.empty?
+    cluster_nodes = get_corosync_nodes()
+    tokens_cfg = Cfgsync::PcsdTokens.from_file('')
+    sync_successful, sync_responses = Cfgsync::save_sync_new_tokens(
+      tokens_cfg, new_tokens, cluster_nodes, $cluster_name
+    )
+  end
+
+  return [200, JSON.generate({'node_auth_error' => node_auth_error})]
+end
+
+# not used anymore, left here for backward compatability reasons
+def get_tokens(params, request, session)
+  # pcsd runs as root thus always returns hacluster's tokens
+  if not allowed_for_local_cluster(session, Permissions::FULL)
+    return 403, 'Permission denied'
+  end
+  return [200, JSON.generate(read_tokens)]
+end
+
+def get_cluster_tokens(params, request, session)
+  # pcsd runs as root thus always returns hacluster's tokens
+  if not allowed_for_local_cluster(session, Permissions::FULL)
+    return 403, "Permission denied"
+  end
+  on, off = get_nodes
+  nodes = on + off
+  nodes.uniq!
+  return [200, JSON.generate(get_tokens_of_nodes(nodes))]
+end
+
+def save_tokens(params, request, session)
+  # pcsd runs as root thus always returns hacluster's tokens
+  if not allowed_for_local_cluster(session, Permissions::FULL)
+    return 403, "Permission denied"
+  end
+
+  new_tokens = {}
+
+  params.each{|nodes|
+    if nodes[0].start_with?"node:" and nodes[0].length > 5
+      node = nodes[0][5..-1]
+      token = nodes[1]
+      new_tokens[node] = token
     end
-    match = /cluster\.nodename=(.*)/.match(out.join("\n"))
-    if not match
-      return ""
+  }
+
+  tokens_cfg = Cfgsync::PcsdTokens.from_file('')
+  sync_successful, sync_responses = Cfgsync::save_sync_new_tokens(
+    tokens_cfg, new_tokens, get_corosync_nodes(), $cluster_name
+  )
+
+  if sync_successful
+    return [200, JSON.generate(read_tokens())]
+  else
+    return [400, "Cannot update tokenfile."]
+  end
+end
+
+def add_node_to_cluster(params, request, session)
+  clustername = params["clustername"]
+  new_node = params["new_nodename"]
+
+  if clustername == $cluster_name
+    if not allowed_for_local_cluster(session, Permissions::FULL)
+      return 403, 'Permission denied'
     end
-    local_node_name = match[1]
-    out, errout, retval = run_cmd(CMAN_TOOL, "nodes", "-F", "id", "-n", local_node_name)
-    if retval != 0
-      return ""
+  end
+
+  tokens = read_tokens
+
+  if not tokens.include? new_node
+    return [400, "New node is not authenticated."]
+  end
+
+  # Save the new node token on all nodes in a cluster the new node is beeing
+  # added to. Send the token to one node and let the cluster nodes synchronize
+  # it by themselves.
+  token_data = {"node:#{new_node}" => tokens[new_node]}
+  retval, out = send_cluster_request_with_token(
+    # new node doesn't have config with permissions yet
+    PCSAuth.getSuperuserSession(), clustername, '/save_tokens', true, token_data
+  )
+  # If the cluster runs an old pcsd which doesn't support /save_tokens,
+  # ignore 404 in order to not prevent the node to be added.
+  if retval != 404 and retval != 200
+    return [400, 'Failed to save the token of the new node in target cluster.']
+  end
+
+  retval, out = send_cluster_request_with_token(
+    session, clustername, "/add_node_all", true, params
+  )
+  if 403 == retval
+    return [retval, out]
+  end
+  if retval != 200
+    return [400, "Failed to add new node '#{new_node}' into cluster '#{clustername}': #{out}"]
+  end
+
+  return [200, "Node added successfully."]
+end
+
+def fix_auth_of_cluster(params, request, session)
+  if not params["clustername"]
+    return [400, "cluster name not defined"]
+  end
+
+  clustername = params["clustername"]
+  nodes = get_cluster_nodes(clustername)
+  tokens_data = add_prefix_to_keys(get_tokens_of_nodes(nodes), "node:")
+
+  retval, out = send_cluster_request_with_token(
+    PCSAuth.getSuperuserSession(), clustername, "/save_tokens", true,
+    tokens_data, true
+  )
+  if retval == 404
+    return [400, "Old version of PCS/PCSD is running on cluster nodes. Fixing authentication is not supported. Use 'pcs cluster auth' command to authenticate the nodes."]
+  elsif retval != 200
+    return [400, "Authentication failed."]
+  end
+  return [200, "Auhentication of nodes in cluster should be fixed."]
+end
+
+def resource_master(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+
+  unless params[:resource_id]
+    return [400, 'resource_id has to be specified.']
+  end
+  _, stderr, retval = run_cmd(
+    session, PCS, 'resource', 'master', params[:resource_id]
+  )
+  if retval != 0
+    return [400, 'Unable to create master/slave resource from ' +
+      "'#{params[:resource_id]}': #{stderr.join('')}"
+    ]
+  end
+  return 200
+end
+
+def resource_change_group(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+
+  if params[:resource_id].nil? or params[:group_id].nil?
+    return [400, 'resource_id and group_id have to be specified.']
+  end
+  if params[:group_id].empty?
+    if params[:old_group_id]
+      _, stderr, retval = run_cmd(
+        session, PCS, 'resource', 'group', 'remove', params[:old_group_id],
+        params[:resource_id]
+      )
+      if retval != 0
+        return [400, "Unable to remove resource '#{params[:resource_id]}' " +
+          "from group '#{params[:old_group_id]}': #{stderr.join('')}"
+        ]
+      end
     end
-    return out[0].strip()
+    return 200
   end
-  out, errout, retval = run_cmd(COROSYNC_CMAPCTL, "-g", "runtime.votequorum.this_node_id")
+  _, stderr, retval = run_cmd(
+    session,
+    PCS, 'resource', 'group', 'add', params[:group_id], params[:resource_id]
+  )
   if retval != 0
-    return ""
-  else
-    return out[0].split(/ = /)[1].strip()
+    return [400, "Unable to add resource '#{params[:resource_id]}' to " +
+      "group '#{params[:group_id]}': #{stderr.join('')}"
+    ]
   end
+  return 200
 end
 
-def need_ring1_address?
-  out, errout, retval = run_cmd(COROSYNC_CMAPCTL)
+def resource_ungroup(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+
+  unless params[:group_id]
+    return [400, 'group_id has to be specified.']
+  end
+  
+  _, stderr, retval = run_cmd(
+    session, PCS, 'resource', 'ungroup', params[:group_id]
+  )
   if retval != 0
-    return false
-  else
-    udpu_transport = false
-    rrp = false
-    out.each { |line|
-      # support both corosync-objctl and corosync-cmapctl format
-      if /^\s*totem\.transport(\s+.*)?=\s*udpu$/.match(line)
-        udpu_transport = true
-      elsif /^\s*totem\.rrp_mode(\s+.*)?=\s*(passive|active)$/.match(line)
-        rrp = true
-      end
-    }
-    # on rhel6 ring1 address is required regardless of transport
-    # it has to be added to cluster.conf in order to set up ring1
-    # in corosync by cman
-    return ((ISRHEL6 and rrp) or (rrp and udpu_transport))
+    return [400, 'Unable to ungroup group ' +
+      "'#{params[:group_id]}': #{stderr.join('')}"
+    ]
   end
+  return 200
 end
 
-def is_cman_with_udpu_transport?
-  if not ISRHEL6
-    return false
+def resource_clone(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
   end
-  begin
-    cluster_conf = File.open(CLUSTER_CONF).read
-    conf_dom = REXML::Document.new(cluster_conf)
-    conf_dom.elements.each("cluster/cman") { |elem|
-      if elem.attributes["transport"].downcase == "udpu"
-        return true
-      end
-    }
-  rescue
-    return false
+
+  unless params[:resource_id]
+    return [400, 'resource_id has to be specified.']
   end
-  return false
+  
+  _, stderr, retval = run_cmd(
+    session, PCS, 'resource', 'clone', params[:resource_id]
+  )
+  if retval != 0
+    return [400, 'Unable to create clone resource from ' +
+      "'#{params[:resource_id]}': #{stderr.join('')}"
+    ]
+  end
+  return 200
+end
+
+def resource_unclone(params, request, session)
+  if not allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+
+  unless params[:resource_id]
+    return [400, 'resource_id has to be specified.']
+  end
+
+  _, stderr, retval = run_cmd(
+    session, PCS, 'resource', 'unclone', params[:resource_id]
+  )
+  if retval != 0
+    return [400, 'Unable to unclone ' +
+      "'#{params[:resource_id]}': #{stderr.join('')}"
+    ]
+  end
+  return 200
+end
+
+def set_resource_utilization(params, reqest, session)
+  unless allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+
+  unless params[:resource_id] and params[:name]
+    return 400, 'resource_id and name are required'
+  end
+
+  res_id = params[:resource_id]
+  name = params[:name]
+  value = params[:value] if params[:value] else ''
+
+  _, stderr, retval = run_cmd(
+    session, PCS, 'resource', 'utilization', res_id, "#{name}=#{value}"
+  )
+
+  if retval != 0
+    return [400, "Unable to set utilization '#{name}=#{value}' for " +
+      "resource '#{res_id}': #{stderr.join('')}"
+    ]
+  end
+  return 200
+end
+
+def set_node_utilization(params, reqest, session)
+  unless allowed_for_local_cluster(session, Permissions::WRITE)
+    return 403, 'Permission denied'
+  end
+
+  unless params[:node] and params[:name]
+    return 400, 'node and name are required'
+  end
+
+  node = params[:node]
+  name = params[:name]
+  value = params[:value] if params[:value] else ''
+
+  _, stderr, retval = run_cmd(
+    session, PCS, 'node', 'utilization', node, "#{name}=#{value}"
+  )
+
+  if retval != 0
+    return [400, "Unable to set utilization '#{name}=#{value}' for node " +
+      "'#{res_id}': #{stderr.join('')}"
+    ]
+  end
+  return 200
 end
diff --git a/pcsd/resource.rb b/pcsd/resource.rb
index 1577e58..aaf61c9 100644
--- a/pcsd/resource.rb
+++ b/pcsd/resource.rb
@@ -1,7 +1,11 @@
-require 'pp'
-
-def getResourcesGroups(get_fence_devices = false, get_all_options = false)
-  stdout, stderror, retval = run_cmd("crm_mon", "--one-shot", "-r", "--as-xml")
+require 'pathname'
+
+def getResourcesGroups(session, get_fence_devices = false, get_all_options = false,
+  get_operations=false
+)
+  stdout, stderror, retval = run_cmd(
+    session, CRM_MON, "--one-shot", "-r", "--as-xml"
+  )
   if retval != 0
     return [],[], retval
   end
@@ -31,7 +35,7 @@ def getResourcesGroups(get_fence_devices = false, get_all_options = false)
     else
       ms = false
       if e.parent.attributes["multi_state"] == "true"
-      	ms = true
+        ms = true
       end
       !get_fence_devices && resource_list.push(Resource.new(e, nil, !ms, ms))
     end
@@ -42,7 +46,7 @@ def getResourcesGroups(get_fence_devices = false, get_all_options = false)
     else
       ms = false
       if e.parent.parent.attributes["multi_state"] == "true"
-      	ms = true
+        ms = true
       end
       !get_fence_devices && resource_list.push(Resource.new(e,e.parent.parent.attributes["id"] + "/" + e.parent.attributes["id"],!ms, ms))
     end
@@ -52,43 +56,59 @@ def getResourcesGroups(get_fence_devices = false, get_all_options = false)
     group_list.push(e.attributes["id"])
   end
 
+  resource_list = resource_list.select { |x| not x.orphaned }
   resource_list = resource_list.sort_by{|a| (a.group ? "1" : "0").to_s + a.group.to_s + "-" +  a.id}
 
-  if get_all_options
-    stdout, stderror, retval = run_cmd("cibadmin", "-Q", "-l")
+  if get_all_options or get_operations
+    stdout, stderror, retval = run_cmd(session, "cibadmin", "-Q", "-l")
     cib_output = stdout
     resources_inst_attr_map = {}
     resources_meta_attr_map = {}
+    resources_operation_map = {}
     begin
       doc = REXML::Document.new(cib_output.join("\n"))
-
-      doc.elements.each('//primitive') do |r|
-	resources_inst_attr_map[r.attributes["id"]] = {}
-	resources_meta_attr_map[r.attributes["id"]] = {}
-	r.each_recursive do |ia|
-	  if ia.node_type == :element and ia.name == "nvpair"
-	    if ia.parent.name == "instance_attributes"
-	      resources_inst_attr_map[r.attributes["id"]][ia.attributes["name"]] = ia.attributes["value"]
-	    elsif ia.parent.name == "meta_attributes"
-	      resources_meta_attr_map[r.attributes["id"]][ia.attributes["name"]] = [ia.attributes["id"],ia.attributes["value"],ia.parent.parent.attributes["id"]]
-	    end
-	  end
-	  if ["group","clone","master"].include?(r.parent.name)
-	    r.parent.elements.each('./meta_attributes/nvpair') do |ma|
-	      resources_meta_attr_map[r.attributes["id"]][ma.attributes["name"]] ||= []
-	      resources_meta_attr_map[r.attributes["id"]][ma.attributes["name"]] = [ma.attributes["id"],ma.attributes["value"],ma.parent.parent.attributes["id"]]
+      if get_all_options
+        doc.elements.each('//primitive') do |r|
+          resources_inst_attr_map[r.attributes["id"]] = {}
+          resources_meta_attr_map[r.attributes["id"]] = {}
+          r.each_recursive do |ia|
+            if ia.node_type == :element and ia.name == "nvpair"
+              if ia.parent.name == "instance_attributes"
+                resources_inst_attr_map[r.attributes["id"]][ia.attributes["name"]] = ia.attributes["value"]
+              elsif ia.parent.name == "meta_attributes"
+                resources_meta_attr_map[r.attributes["id"]][ia.attributes["name"]] = [ia.attributes["id"],ia.attributes["value"],ia.parent.parent.attributes["id"]]
+              end
+            end
+            if ["group","clone","master"].include?(r.parent.name)
+              r.parent.elements.each('./meta_attributes/nvpair') do |ma|
+                resources_meta_attr_map[r.attributes["id"]][ma.attributes["name"]] ||= []
+                resources_meta_attr_map[r.attributes["id"]][ma.attributes["name"]] = [ma.attributes["id"],ma.attributes["value"],ma.parent.parent.attributes["id"]]
+              end
             end
           end
-
-	end
-
+        end
+        resource_list.each {|r|
+          r.options = resources_inst_attr_map[r.id]
+          r.instance_attr = resources_inst_attr_map[r.id]
+          r.meta_attr = resources_meta_attr_map[r.id]
+        }
       end
 
-      resource_list.each {|r|
-	r.options = resources_inst_attr_map[r.id]
-	r.instance_attr = resources_inst_attr_map[r.id]
-	r.meta_attr = resources_meta_attr_map[r.id]
-      }
+      if get_operations
+        doc.elements.each('//lrm_rsc_op') { |rsc_op|
+          resources_operation_map[rsc_op.parent.attributes['id']] ||= []
+          resources_operation_map[rsc_op.parent.attributes['id']] << (
+            ResourceOperation.new(rsc_op)
+          )
+        }
+        resource_list.each {|r|
+          if resources_operation_map[r.id]
+            r.operations = resources_operation_map[r.id].sort { |a, b|
+              a.call_id <=> b.call_id
+            }
+          end
+        }
+      end
     rescue REXML::ParseException
       $logger.info("ERROR: Parse Exception parsing cibadmin -Q")
     end
@@ -97,38 +117,11 @@ def getResourcesGroups(get_fence_devices = false, get_all_options = false)
   [resource_list, group_list, 0]
 end
 
-def getResourceOptions(resource_id,stonith=false)
-  # Strip ':' from resource name (for clones & master/slave)
-  resource_id = resource_id.sub(/(.*):.*/,'\1')
-
-  ret = {}
-  if stonith
-    resource_options = `#{PCS} stonith show #{resource_id}`
-  else
-    resource_options = `#{PCS} resource show #{resource_id}`
-  end
-  resource_options.each_line { |line|
-    keyval = line.strip.split(/: /,2)
-    if keyval[0] == "Attributes" then
-      options = keyval[1].split(/ /)
-      options.each {|opt|
-      	kv = opt.split(/=/)
-      	ret[kv[0]] = kv[1]
-      }
-    end
-  }
-  return ret
-end
-
-def getAllConstraints()
-  stdout, stderror, retval = run_cmd("cibadmin", "-Q", "-l", "--xpath", "//constraints")
+def getAllConstraints(constraints_dom)
   constraints = {}
-  if retval != 0
-    return {}
-  end
-  doc = REXML::Document.new(stdout.join("\n"))
-  constraints = {}
-  doc.elements.each('constraints/*') do |e|
+  doc = constraints_dom
+
+  doc.elements.each() { |e|
     if e.name == 'rsc_location' and e.has_elements?()
       rule_export = RuleToExpression.new()
       e.elements.each('rule') { |rule|
@@ -170,123 +163,49 @@ def getAllConstraints()
         constraints[e.name] = [e.attributes]
       end
     end
-  end
+  }
   return constraints
 end
 
-# Returns two arrays, one that lists resources that start before
-# one that lists resources that start after
-def getOrderingConstraints(resource_id)
-  ordering_constraints = `#{PCS} constraint order show all`
-  before = []
-  after = []
-  ordering_constraints.each_line { |line|
-    if line.start_with?("Ordering Constraints:")
-      next
-    end
-    line.strip!
-    sline = line.split(/ /,6)
-    if (sline[0] == resource_id)
-      after << [sline[-1].to_s[4..-2],sline[2]]
-    end
-    if (sline[2] == resource_id)
-      before << [sline[-1].to_s[4..-2],sline[0]]
-    end
-  }
-  return before,after
-end
+def getResourceMetadata(session, resourcepath)
+  options_required = {}
+  options_optional = {}
+  long_desc = ""
+  short_desc = ""
 
-# Returns two arrays, one that lists nodes that can run resource
-# one that lists nodes that cannot
-def getLocationConstraints(resource_id)
-  location_constraints = `#{PCS} constraint location show all`
-  enabled_nodes = {}
-  disabled_nodes = {}
-  inResource = false
-  location_constraints.each_line { |line|
-    line.strip!
-    next if line.start_with?("Location Constraints:")
-    if line.start_with?("Resource:")
-      if line == "Resource: " + resource_id
-	inResource = true
-      else
-	inResource = false
-      end
-      next
-    end
-    next if !inResource
-    if line.start_with?("Enabled on:")
-      prev = nil
-      line.split(/: /,2)[1].split(/ /).each { |n|
-	if n.start_with?("(id:")
-	  enabled_nodes[prev][0] = n[4..-2]
-	elsif n.start_with?("(")
-	  enabled_nodes[prev][1] = n[1..-2]
-	else
-	  enabled_nodes[n] = []
-	  prev = n
-	end
-      }
-    end
-    if line.start_with?("Disabled on:")
-      prev = nil
-      line.split(/: /,2)[1].split(/ /).each { |n|
-	if n.start_with?("(id:")
-	  disabled_nodes[prev][0] = n[4..-2]
-	elsif n.start_with?("(")
-	  disabled_nodes[prev][1] = n[1..-2]
-	else
-	  disabled_nodes[n] = []
-	  prev = n
-	end
-      }
-    end
-  }
-  return enabled_nodes,disabled_nodes
-end
+  resourcepath = Pathname.new(resourcepath).cleanpath.to_s
+  resource_dirs = [
+    HEARTBEAT_AGENTS_DIR, PACEMAKER_AGENTS_DIR, NAGIOS_METADATA_DIR,
+  ]
+  if not resource_dirs.any? { |allowed| resourcepath.start_with?(allowed) }
+    $logger.error(
+      "Unable to get metadata of resource agent '#{resourcepath}': " +
+      'path not allowed'
+    )
+    return [options_required, options_optional, [short_desc, long_desc]]
+  end
 
-# Returns two arrays, one that lists resources that should be together
-# one that lists resources that should be apart
-def getColocationConstraints(resource_id)
-  colocation_constraints = `#{PCS} constraint colocation show all`
-  together = []
-  apart = []
-  colocation_constraints.each_line { |line|
-    if line.start_with?("Colocation Constraints:")
-      next
-    end
-    line.strip!
-    sline = line.split(/ /,5)
-    score = []
-    score[0] = sline[4][4..-2]
-    score[1] = sline[3][1..-2]
-    if (sline[0] == resource_id)
-      if score[1] == "INFINITY"  or (score[1] != "-INFINITY" and score[1].to_i >= 0)
-	together << [sline[2],score]
-      else
-	apart << [sline[2],score]
-      end
+  if resourcepath.end_with?('.xml')
+    begin
+      metadata = IO.read(resourcepath)
+    rescue
+      metadata = ""
     end
+  else
+    ENV['OCF_ROOT'] = OCF_ROOT
+    stdout, stderr, retval = run_cmd(session, resourcepath, 'meta-data')
+    metadata = stdout.join
+  end
 
-    if (sline[2] == resource_id)
-      if score[1] == "INFINITY"  or (score[1] != "-INFINITY" and score[1].to_i >= 0)
-	together << [sline[0],score]
-      else
-	apart << [sline[0],score]
-      end
-    end
-  }
-  return together,apart
-end
+  begin
+    doc = REXML::Document.new(metadata)
+  rescue REXML::ParseException => e
+    $logger.error(
+      "Unable to parse metadata of resource agent '#{resourcepath}': #{e}"
+    )
+    return [options_required, options_optional, [short_desc, long_desc]]
+  end
 
-def getResourceMetadata(resourcepath)
-  ENV['OCF_ROOT'] = OCF_ROOT
-  metadata = `#{resourcepath} meta-data`
-  doc = REXML::Document.new(metadata)
-  options_required = {}
-  options_optional = {}
-  long_desc = ""
-  short_desc = ""
   doc.elements.each('resource-agent/longdesc') {|ld|
     long_desc = ld.text ? ld.text.strip : ld.text
   }
@@ -298,62 +217,52 @@ def getResourceMetadata(resourcepath)
     temp_array = []
     if param.attributes["required"] == "1"
       if param.elements["shortdesc"] and param.elements["shortdesc"].text
-	temp_array << param.elements["shortdesc"].text.strip
+        temp_array << param.elements["shortdesc"].text.strip
       else
-      	temp_array << ""
+        temp_array << ""
       end
       if param.elements["longdesc"] and param.elements["longdesc"].text
-	temp_array << param.elements["longdesc"].text.strip
+        temp_array << param.elements["longdesc"].text.strip
       else
-      	temp_array << ""
+        temp_array << ""
       end
       options_required[param.attributes["name"]] = temp_array
     else
       if param.elements["shortdesc"] and param.elements["shortdesc"].text
-	temp_array << param.elements["shortdesc"].text.strip
+        temp_array << param.elements["shortdesc"].text.strip
       else
-      	temp_array << ""
+        temp_array << ""
       end
       if param.elements["longdesc"] and param.elements["longdesc"].text
-	temp_array << param.elements["longdesc"].text.strip
+        temp_array << param.elements["longdesc"].text.strip
       else
-      	temp_array << ""
+        temp_array << ""
       end
       options_optional[param.attributes["name"]] = temp_array
     end
   }
-  [options_required, options_optional, [short_desc,long_desc]]
+  [options_required, options_optional, [short_desc, long_desc]]
 end
 
-def getResourceAgents(resource_agent = nil)
+def getResourceAgents(session)
   resource_agent_list = {}
-  stdout, stderr, retval = run_cmd(PCS, "resource", "list", "--nodesc")
+  stdout, stderr, retval = run_cmd(session, PCS, "resource", "list", "--nodesc")
   if retval != 0
-    logger.error("Error running 'pcs resource list --nodesc")
-    logger.error(stdout + stderr)
+    $logger.error("Error running 'pcs resource list --nodesc")
+    $logger.error(stdout + stderr)
     return {}
   end
 
   agents = stdout
-
   agents.each { |a|
     ra = ResourceAgent.new
     ra.name = a.chomp
-
-    if resource_agent and (a.start_with?("ocf:heartbeat:") or a.start_with?("ocf:pacemaker:"))
-      split_agent = ra.name.split(/:/)
-      path = OCF_ROOT + '/resource.d/' + split_agent[1] + "/" + split_agent[2]
-      required_options, optional_options, resource_info = getResourceMetadata(path)
-      ra.required_options = required_options
-      ra.optional_options = optional_options
-      ra.info = resource_info
-    end
     resource_agent_list[ra.name] = ra
   }
-  resource_agent_list
+  return resource_agent_list
 end
 
-class Resource 
+class Resource
   attr_accessor :id, :name, :type, :agent, :agentname, :role, :active,
     :orphaned, :managed, :failed, :failure_ignored, :nodes, :location,
     :options, :group, :clone, :stonith, :ms, :operations,
@@ -365,7 +274,7 @@ class Resource
     @active = e.attributes["active"] == "true" ? true : false
     @orphaned = e.attributes["orphaned"] == "true" ? true : false
     @failed = e.attributes["failed"] == "true" ? true : false
-    @active = e.attributes["active"] == "true" ? true : false
+    @role = e.attributes['role']
     @nodes = []
     # Strip ':' from group name (for clones & master/slave created from a group)
     @group = group ? group.sub(/(.*):.*/, '\1') : group
@@ -377,7 +286,7 @@ class Resource
     @options = {}
     @instance_attr = {}
     @meta_attr = {}
-    @operations = {}
+    @operations = []
     e.elements.each do |n| 
       node = Node.new
       node.name = n.attributes["name"]
@@ -395,7 +304,8 @@ class Resource
   end
 
   def disabled
-    if meta_attr and meta_attr["target-role"] and meta_attr["target-role"] == "Stopped"
+    return false if @stonith
+    if meta_attr and meta_attr["target-role"] and meta_attr["target-role"][1] == "Stopped"
       return true
     else
       return false
@@ -403,6 +313,7 @@ class Resource
   end
 end
 
+
 class ResourceAgent
   attr_accessor :name, :resource_class, :required_options, :optional_options, :info
   def initialize(name=nil, required_options={}, optional_options={}, resource_class=nil)
@@ -447,8 +358,50 @@ class ResourceAgent
   end
 end
 
-class RuleToExpression
 
+class ResourceOperation
+  attr_accessor :call_id, :crm_debug_origin, :crm_feature_set, :exec_time,
+    :exit_reason, :id, :interval, :last_rc_change, :last_run, :on_node,
+    :op_digest, :operation, :operation_key, :op_force_restart,
+    :op_restart_digest, :op_status, :queue_time, :rc_code, :transition_key,
+    :transition_magic
+  def initialize(op_element)
+    @call_id = op_element.attributes['call-id'].to_i
+    @crm_debug_origin = op_element.attributes['crm-debug-origin']
+    @crm_feature_set = op_element.attributes['crm_feature_set']
+    @exec_time = op_element.attributes['exec-time'].to_i
+    @exit_reason = op_element.attributes['exit-reason']
+    @id = op_element.attributes['id']
+    @interval = op_element.attributes['interval'].to_i
+    @last_rc_change = op_element.attributes['last-rc-change'].to_i
+    @last_run = op_element.attributes['last-run'].to_i
+    @on_node = op_element.attributes['on_node']
+    @op_digest = op_element.attributes['op-digest']
+    @operation_key = op_element.attributes['operation_key']
+    @operation = op_element.attributes['operation']
+    @op_force_restart = op_element.attributes['op-force-restart']
+    @op_restart_digest = op_element.attributes['op-restart-digest']
+    @op_status = op_element.attributes['op-status'].to_i
+    @queue_time = op_element.attributes['queue-time'].to_i
+    @rc_code = op_element.attributes['rc-code'].to_i
+    @transition_key = op_element.attributes['transition-key']
+    @transition_magic = op_element.attributes['transition-magic']
+
+    if not @on_node
+      elem = op_element.parent
+      while elem
+        if elem.name == 'node_state'
+          @on_node = elem.attributes['uname']
+          break
+        end
+        elem = elem.parent
+      end
+    end
+  end
+end
+
+
+class RuleToExpression
   def export(rule)
     boolean_op = 'and'
     if rule.attributes.key?('boolean-op')
@@ -523,5 +476,4 @@ class RuleToExpression
     end
     return part_list.join(' ')
   end
-
 end
diff --git a/pcsd/settings.rb b/pcsd/settings.rb
new file mode 100644
index 0000000..ff056a4
--- /dev/null
+++ b/pcsd/settings.rb
@@ -0,0 +1,24 @@
+PCSD_EXEC_LOCATION = '/usr/lib/pcsd/'
+PCSD_VAR_LOCATION = '/var/lib/pcsd/'
+
+CRT_FILE = PCSD_VAR_LOCATION + 'pcsd.crt'
+KEY_FILE = PCSD_VAR_LOCATION + 'pcsd.key'
+COOKIE_FILE = PCSD_VAR_LOCATION + 'pcsd.cookiesecret'
+
+OCF_ROOT = "/usr/lib/ocf"
+HEARTBEAT_AGENTS_DIR = "/usr/lib/ocf/resource.d/heartbeat/"
+PACEMAKER_AGENTS_DIR = "/usr/lib/ocf/resource.d/pacemaker/"
+NAGIOS_METADATA_DIR = '/usr/share/pacemaker/nagios/plugins-metadata/'
+PENGINE = "/usr/libexec/pacemaker/pengine"
+CIB_BINARY = '/usr/libexec/pacemaker/cib'
+CRM_MON = "/usr/sbin/crm_mon"
+CRM_NODE = "/usr/sbin/crm_node"
+CRM_ATTRIBUTE = "/usr/sbin/crm_attribute"
+COROSYNC_BINARIES = "/usr/sbin/"
+CMAN_TOOL = "/usr/sbin/cman_tool"
+PACEMAKERD = "/usr/sbin/pacemakerd"
+CIBADMIN = "/usr/sbin/cibadmin"
+
+SUPERUSER = 'hacluster'
+ADMIN_GROUP = 'haclient'
+$user_pass_file = "pcs_users.conf"
diff --git a/pcsd/settings.rb.i386-linux-gnu.debian b/pcsd/settings.rb.i386-linux-gnu.debian
new file mode 100644
index 0000000..73d2c80
--- /dev/null
+++ b/pcsd/settings.rb.i386-linux-gnu.debian
@@ -0,0 +1,24 @@
+PCSD_EXEC_LOCATION = '/usr/share/pcsd/'
+PCSD_VAR_LOCATION = '/var/lib/pcsd/'
+
+CRT_FILE = PCSD_VAR_LOCATION + 'pcsd.crt'
+KEY_FILE = PCSD_VAR_LOCATION + 'pcsd.key'
+COOKIE_FILE = PCSD_VAR_LOCATION + 'pcsd.cookiesecret'
+
+OCF_ROOT = "/usr/lib/ocf"
+HEARTBEAT_AGENTS_DIR = "/usr/lib/ocf/resource.d/heartbeat/"
+PACEMAKER_AGENTS_DIR = "/usr/lib/ocf/resource.d/pacemaker/"
+NAGIOS_METADATA_DIR = '/usr/share/pacemaker/nagios/plugins-metadata/'
+PENGINE = "/usr/lib/i386-linux-gnu/pacemaker/pengine"
+CIB_BINARY = '/usr/lib/i386-linux-gnu/pacemaker/cib'
+CRM_MON = "/usr/sbin/crm_mon"
+CRM_NODE = "/usr/sbin/crm_node"
+CRM_ATTRIBUTE = "/usr/sbin/crm_attribute"
+COROSYNC_BINARIES = "/usr/sbin/"
+CMAN_TOOL = "/usr/sbin/cman_tool"
+PACEMAKERD = "/usr/sbin/pacemakerd"
+CIBADMIN = "/usr/sbin/cibadmin"
+
+SUPERUSER = 'hacluster'
+ADMIN_GROUP = 'haclient'
+$user_pass_file = "pcs_users.conf"
diff --git a/pcsd/settings.rb.x86_64-linux-gnu.debian b/pcsd/settings.rb.x86_64-linux-gnu.debian
new file mode 100644
index 0000000..e67ed54
--- /dev/null
+++ b/pcsd/settings.rb.x86_64-linux-gnu.debian
@@ -0,0 +1,24 @@
+PCSD_EXEC_LOCATION = '/usr/share/pcsd/'
+PCSD_VAR_LOCATION = '/var/lib/pcsd/'
+
+CRT_FILE = PCSD_VAR_LOCATION + 'pcsd.crt'
+KEY_FILE = PCSD_VAR_LOCATION + 'pcsd.key'
+COOKIE_FILE = PCSD_VAR_LOCATION + 'pcsd.cookiesecret'
+
+OCF_ROOT = "/usr/lib/ocf"
+HEARTBEAT_AGENTS_DIR = "/usr/lib/ocf/resource.d/heartbeat/"
+PACEMAKER_AGENTS_DIR = "/usr/lib/ocf/resource.d/pacemaker/"
+NAGIOS_METADATA_DIR = '/usr/share/pacemaker/nagios/plugins-metadata/'
+PENGINE = "/usr/lib/x86_64-linux-gnu/pacemaker/pengine"
+CIB_BINARY = '/usr/lib/x86_64-linux-gnu/pacemaker/cib'
+CRM_MON = "/usr/sbin/crm_mon"
+CRM_NODE = "/usr/sbin/crm_node"
+CRM_ATTRIBUTE = "/usr/sbin/crm_attribute"
+COROSYNC_BINARIES = "/usr/sbin/"
+CMAN_TOOL = "/usr/sbin/cman_tool"
+PACEMAKERD = "/usr/sbin/pacemakerd"
+CIBADMIN = "/usr/sbin/cibadmin"
+
+SUPERUSER = 'hacluster'
+ADMIN_GROUP = 'haclient'
+$user_pass_file = "pcs_users.conf"
diff --git a/pcsd/ssl.rb b/pcsd/ssl.rb
index 1001755..2858574 100644
--- a/pcsd/ssl.rb
+++ b/pcsd/ssl.rb
@@ -2,38 +2,15 @@ require 'rubygems'
 require 'webrick'
 require 'webrick/https'
 require 'openssl'
-require 'logger'
 require 'rack'
 
-def get_rhel_version()
-  if File.exists?('/etc/system-release')
-    release = File.open('/etc/system-release').read
-    match = /(\d+)\.(\d+)/.match(release)
-    if match
-      return match[1, 2].collect{ | x | x.to_i}
-    end
-  end
-  return nil
-end
-
-def is_rhel6()
-  version = get_rhel_version()
-  return (version and version[0] == 6)
-end
-
-def is_systemctl()
-  if File.exist?('/usr/bin/systemctl')
-    return true
-  else
-    return false
-  end
-end
+require 'bootstrap.rb'
+require 'pcs.rb'
 
-CRT_FILE = "/var/lib/pcsd/pcsd.crt"
-KEY_FILE = "/var/lib/pcsd/pcsd.key"
 server_name = WEBrick::Utils::getservername
+$logger = configure_logger('/var/log/pcsd/pcsd.log')
 
-if not File.exists?(CRT_FILE) or not File.exists?(KEY_FILE)
+def generate_cert_key_pair(server_name)
   name = "/C=US/ST=MN/L=Minneapolis/O=pcsd/OU=pcsd/CN=#{server_name}"
   ca   = OpenSSL::X509::Name.parse(name)
   key = OpenSSL::PKey::RSA.new(2048)
@@ -46,42 +23,65 @@ if not File.exists?(CRT_FILE) or not File.exists?(KEY_FILE)
   crt.not_before = Time.now
   crt.not_after  = Time.now + 10 * 365 * 24 * 60 * 60 # 10 year
   crt.sign(key, OpenSSL::Digest::SHA256.new)
+  return crt, key
+end
 
+if not File.exists?(CRT_FILE) or not File.exists?(KEY_FILE)
+  crt, key = generate_cert_key_pair(server_name)
   File.open(CRT_FILE, 'w',0700) {|f| f.write(crt)}
   File.open(KEY_FILE, 'w',0700) {|f| f.write(key)}
+else
+  crt, key = nil, nil
+  begin
+    crt = File.read(CRT_FILE)
+    key = File.read(KEY_FILE)
+  rescue => e
+    $logger.error "Unable to read certificate or key: #{e}"
+  end
+  crt_errors = verify_cert_key_pair(crt, key)
+  if crt_errors and not crt_errors.empty?
+    crt_errors.each { |err| $logger.error err }
+    $logger.error "Invalid certificate and/or key, using temporary ones"
+    crt, key = generate_cert_key_pair(server_name)
+  end
 end
 
 webrick_options = {
   :Port               => 2224,
-  :BindAddress        => nil,
+  :BindAddress        => '::',
+  :Host               => '::',
   :SSLEnable          => true,
   :SSLVerifyClient    => OpenSSL::SSL::VERIFY_NONE,
-  :SSLCertificate     => OpenSSL::X509::Certificate.new(File.open(CRT_FILE).read),
-  :SSLPrivateKey      => OpenSSL::PKey::RSA.new(File.open(KEY_FILE).read()),
+  :SSLCertificate     => OpenSSL::X509::Certificate.new(crt),
+  :SSLPrivateKey      => OpenSSL::PKey::RSA.new(key),
   :SSLCertName        => [[ "CN", server_name ]],
   :SSLOptions         => OpenSSL::SSL::OP_NO_SSLv2 | OpenSSL::SSL::OP_NO_SSLv3,
 }
 
-if is_systemctl
-  webrick_options[:StartCallback] = Proc.new {
-    `python /usr/lib/pcsd/systemd-notify-fix.py`
-  }
-end
-
 server = ::Rack::Handler::WEBrick
 trap(:INT) do
   puts "Shutting down (INT)"
-  server.shutdown
-  #exit
+  if server.instance_variable_get("@server")
+    server.shutdown
+  else
+    exit
+  end
 end
 
 trap(:TERM) do
   puts "Shutting down (TERM)"
-  server.shutdown
-  #exit
+  if server.instance_variable_get("@server")
+    server.shutdown
+  else
+    exit
+  end
 end
 
 require 'pcsd'
 begin
   server.run(Sinatra::Application, webrick_options)
+rescue Errno::EAFNOSUPPORT
+  webrick_options[:BindAddress] = '0.0.0.0'
+  webrick_options[:Host] = '0.0.0.0'
+  server.run(Sinatra::Application, webrick_options)
 end
diff --git a/pcsd/systemd-notify-fix.py b/pcsd/systemd-notify-fix.py
deleted file mode 100644
index 8f26918..0000000
--- a/pcsd/systemd-notify-fix.py
+++ /dev/null
@@ -1,16 +0,0 @@
-import socket
-import os
-import sys
-import time
-s = socket.socket(socket.AF_UNIX, socket.SOCK_DGRAM)
-e = os.getenv('NOTIFY_SOCKET')
-if not e:
-    sys.exit()
-if e.startswith('@'):
-  # abstract namespace socket
-  e = '\0%s' % e[1:]
-print e
-s.connect(e)
-s.sendall("READY=1")
-s.close()
-time.sleep(5)
diff --git a/pcsd/test/.gitignore b/pcsd/test/.gitignore
new file mode 100644
index 0000000..1944fd6
--- /dev/null
+++ b/pcsd/test/.gitignore
@@ -0,0 +1 @@
+*.tmp
diff --git a/pcsd/test/Makefile b/pcsd/test/Makefile
new file mode 100644
index 0000000..8682dfc
--- /dev/null
+++ b/pcsd/test/Makefile
@@ -0,0 +1,6 @@
+this_dir := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
+parent_dir := $(shell dirname $(this_dir))
+gem_home := $(parent_dir)/vendor/bundle/ruby
+
+test:
+	GEM_HOME=$(gem_home) ruby -I$(parent_dir) -I$(this_dir) test_all_suite.rb
diff --git a/pcsd/test/cib1.xml b/pcsd/test/cib1.xml
new file mode 100644
index 0000000..f603f24
--- /dev/null
+++ b/pcsd/test/cib1.xml
@@ -0,0 +1,401 @@
+<cib crm_feature_set="3.0.9" validate-with="pacemaker-2.2" epoch="114" num_updates="0" admin_epoch="0" cib-last-written="Sun Jul  5 12:47:19 2015" have-quorum="1" dc-uuid="3">
+  <configuration>
+    <crm_config>
+      <cluster_property_set id="cib-bootstrap-options">
+        <nvpair id="cib-bootstrap-options-have-watchdog" name="have-watchdog" value="false"/>
+        <nvpair id="cib-bootstrap-options-dc-version" name="dc-version" value="1.1.12-a9c8177"/>
+        <nvpair id="cib-bootstrap-options-cluster-infrastructure" name="cluster-infrastructure" value="corosync"/>
+        <nvpair id="cib-bootstrap-options-cluster-name" name="cluster-name" value="mycluster"/>
+        <nvpair id="cib-bootstrap-options-last-lrm-refresh" name="last-lrm-refresh" value="1435909423"/>
+      </cluster_property_set>
+    </crm_config>
+    <nodes>
+      <node id="1" uname="node1"/>
+      <node id="2" uname="node2"/>
+      <node id="3" uname="node3">
+        <instance_attributes id="nodes-3"/>
+      </node>
+    </nodes>
+    <resources>
+      <primitive class="stonith" id="node1-stonith" type="fence_xvm">
+        <instance_attributes id="node1-stonith-instance_attributes">
+          <nvpair id="node1-stonith-instance_attributes-domain" name="domain" value="node1"/>
+        </instance_attributes>
+        <operations>
+          <op id="node1-stonith-monitor-interval-60s" interval="60s" name="monitor"/>
+        </operations>
+      </primitive>
+      <primitive class="stonith" id="node2-stonith" type="fence_xvm">
+        <instance_attributes id="node2-stonith-instance_attributes">
+          <nvpair id="node2-stonith-instance_attributes-domain" name="domain" value="node2"/>
+        </instance_attributes>
+        <operations>
+          <op id="node2-stonith-monitor-interval-60s" interval="60s" name="monitor"/>
+        </operations>
+      </primitive>
+      <primitive class="stonith" id="node3-stonith" type="fence_xvm">
+        <instance_attributes id="node3-stonith-instance_attributes">
+          <nvpair id="node3-stonith-instance_attributes-domain" name="domain" value="node3"/>
+        </instance_attributes>
+        <operations>
+          <op id="node3-stonith-monitor-interval-60s" interval="60s" name="monitor"/>
+        </operations>
+      </primitive>
+      <primitive class="ocf" id="dummy1" provider="heartbeat" type="Dummy">
+        <instance_attributes id="dummy1-instance_attributes">
+          <nvpair id="dummy1-instance_attributes-fake" name="fake" value="--test"/>
+        </instance_attributes>
+        <utilization id="dummy1-utilization">
+          <nvpair id="dummy1-utilization-test_name" name="test_name" value="-10"/>
+          <nvpair id="dummy1-utilization-another_one" name="another_one" value="0"/>
+        </utilization>
+        <operations>
+          <op id="dummy1-start-interval-0s" interval="0s" name="start" timeout="20"/>
+          <op id="dummy1-stop-interval-0s" interval="0s" name="stop" timeout="20"/>
+          <op id="dummy1-monitor-interval-10" interval="10" name="monitor" timeout="20"/>
+        </operations>
+        <meta_attributes id="dummy1-meta_attributes">
+          <nvpair id="dummy1-meta_attributes-testattr" name="testattr" value="0"/>
+          <nvpair id="dummy1-meta_attributes-attr2" name="attr2" value="10"/>
+        </meta_attributes>
+      </primitive>
+      <group id="group1">
+        <primitive class="ocf" id="dummy3" provider="heartbeat" type="Dummy">
+          <instance_attributes id="dummy3-instance_attributes"/>
+          <utilization id="dummy3-utilization"/>
+          <operations>
+            <op id="dummy3-start-interval-0s" interval="0s" name="start" timeout="20"/>
+            <op id="dummy3-stop-interval-0s" interval="0s" name="stop" timeout="20"/>
+            <op id="dummy3-monitor-interval-10" interval="10" name="monitor" timeout="20"/>
+          </operations>
+          <meta_attributes id="dummy3-meta_attributes">
+            <nvpair id="dummy3-meta_attributes-aaa" name="aaa" value="111"/>
+            <nvpair id="dummy3-meta_attributes-b" name="b" value="3"/>
+          </meta_attributes>
+        </primitive>
+        <primitive class="ocf" id="dummy4" provider="heartbeat" type="Dummy">
+          <instance_attributes id="dummy4-instance_attributes"/>
+          <operations>
+            <op id="dummy4-start-interval-0s" interval="0s" name="start" timeout="20"/>
+            <op id="dummy4-stop-interval-0s" interval="0s" name="stop" timeout="20"/>
+            <op id="dummy4-monitor-interval-10" interval="10" name="monitor" timeout="20"/>
+          </operations>
+          <meta_attributes id="dummy4-meta_attributes">
+            <nvpair id="dummy4-meta_attributes-aaa" name="aaa" value="222"/>
+            <nvpair id="dummy4-meta_attributes-b" name="b" value="4"/>
+          </meta_attributes>
+        </primitive>
+        <meta_attributes id="group1-meta_attributes">
+          <nvpair id="group1-meta_attributes-c" name="c" value="1"/>
+          <nvpair id="group1-meta_attributes-aaa" name="aaa" value="333"/>
+        </meta_attributes>
+      </group>
+      <clone id="dummy-clone">
+        <primitive class="ocf" id="dummy" provider="heartbeat" type="Dummy">
+          <instance_attributes id="dummy-instance_attributes"/>
+          <operations>
+            <op id="dummy-start-interval-0s" interval="0s" name="start" timeout="20"/>
+            <op id="dummy-stop-interval-0s" interval="0s" name="stop" timeout="20"/>
+            <op id="dummy-monitor-interval-10" interval="10" name="monitor" timeout="20"/>
+          </operations>
+          <meta_attributes id="dummy-meta_attributes">
+            <nvpair id="dummy-meta_attributes-aaa" name="aaa" value="111"/>
+            <nvpair id="dummy-meta_attributes-bbb" name="bbb" value="111"/>
+          </meta_attributes>
+        </primitive>
+        <meta_attributes id="dummy-clone-meta">
+          <nvpair id="dummy-clone-meta_attributes-aaa" name="aaa" value="222"/>
+          <nvpair id="dummy-clone-meta_attributes-ccc" name="ccc" value="222"/>
+        </meta_attributes>
+      </clone>
+      <clone id="group2-clone">
+        <group id="group2">
+          <primitive class="ocf" id="dummy6" provider="heartbeat" type="Dummy">
+            <instance_attributes id="dummy6-instance_attributes"/>
+            <utilization id="dummy6-utilization">
+              <nvpair id="dummy6-utilization-util1" name="util1" value="8"/>
+            </utilization>
+            <operations>
+              <op id="dummy6-start-interval-0s" interval="0s" name="start" timeout="20"/>
+              <op id="dummy6-stop-interval-0s" interval="0s" name="stop" timeout="20"/>
+              <op id="dummy6-monitor-interval-10" interval="10" name="monitor" timeout="20"/>
+            </operations>
+            <meta_attributes id="dummy6-meta_attributes">
+              <nvpair id="dummy6-meta_attributes-a" name="a" value="6"/>
+              <nvpair id="dummy6-meta_attributes-b" name="b" value="6"/>
+            </meta_attributes>
+          </primitive>
+          <primitive class="ocf" id="dummy5" provider="heartbeat" type="Dummy">
+            <instance_attributes id="dummy5-instance_attributes"/>
+            <operations>
+              <op id="dummy5-start-interval-0s" interval="0s" name="start" timeout="20"/>
+              <op id="dummy5-stop-interval-0s" interval="0s" name="stop" timeout="20"/>
+              <op id="dummy5-monitor-interval-10" interval="10" name="monitor" timeout="20"/>
+            </operations>
+            <meta_attributes id="dummy5-meta_attributes">
+              <nvpair id="dummy5-meta_attributes-a" name="a" value="5"/>
+              <nvpair id="dummy5-meta_attributes-b" name="b" value="5"/>
+              <nvpair id="dummy5-meta_attributes-x" name="x" value="0"/>
+            </meta_attributes>
+          </primitive>
+          <meta_attributes id="group2-meta_attributes">
+            <nvpair id="group2-meta_attributes-a" name="a" value="2"/>
+            <nvpair id="group2-meta_attributes-c" name="c" value="2"/>
+            <nvpair id="group2-meta_attributes-d" name="d" value="2"/>
+          </meta_attributes>
+        </group>
+        <meta_attributes id="group2-clone-meta">
+          <nvpair id="group2-clone-meta_attributes-a" name="a" value="1"/>
+          <nvpair id="group2-clone-meta_attributes-d" name="d" value="1"/>
+        </meta_attributes>
+      </clone>
+      <master id="ms-master">
+        <primitive class="ocf" id="ms" provider="pacemaker" type="Stateful">
+          <instance_attributes id="ms-instance_attributes"/>
+          <operations>
+            <op id="ms-start-interval-0s" interval="0s" name="start" timeout="20"/>
+            <op id="ms-stop-interval-0s" interval="0s" name="stop" timeout="20"/>
+            <op id="ms-monitor-interval-10" interval="10" name="monitor" role="Master" timeout="20"/>
+            <op id="ms-monitor-interval-11" interval="11" name="monitor" role="Slave" timeout="20"/>
+          </operations>
+          <meta_attributes id="ms-meta_attributes">
+            <nvpair id="ms-meta_attributes-a" name="a" value="0"/>
+            <nvpair id="ms-meta_attributes-c" name="c" value="0"/>
+          </meta_attributes>
+        </primitive>
+        <meta_attributes id="ms-master-meta_attributes">
+          <nvpair id="ms-master-meta_attributes-a" name="a" value="1"/>
+          <nvpair id="ms-master-meta_attributes-b" name="b" value="1"/>
+        </meta_attributes>
+      </master>
+      <master id="group3-master">
+        <group id="group3">
+          <primitive class="ocf" id="ms1" provider="pacemaker" type="Stateful">
+            <instance_attributes id="ms1-instance_attributes"/>
+            <operations>
+              <op id="ms1-start-interval-0s" interval="0s" name="start" timeout="20"/>
+              <op id="ms1-stop-interval-0s" interval="0s" name="stop" timeout="20"/>
+              <op id="ms1-monitor-interval-10" interval="10" name="monitor" role="Master" timeout="20"/>
+              <op id="ms1-monitor-interval-11" interval="11" name="monitor" role="Slave" timeout="20"/>
+            </operations>
+            <meta_attributes id="ms1-meta_attributes">
+              <nvpair id="ms1-meta_attributes-a" name="a" value="1"/>
+              <nvpair id="ms1-meta_attributes-b" name="b" value="1"/>
+              <nvpair id="ms1-meta_attributes-d" name="d" value="1"/>
+            </meta_attributes>
+          </primitive>
+          <primitive class="ocf" id="ms2" provider="pacemaker" type="Stateful">
+            <instance_attributes id="ms2-instance_attributes"/>
+            <operations>
+              <op id="ms2-start-interval-0s" interval="0s" name="start" timeout="20"/>
+              <op id="ms2-stop-interval-0s" interval="0s" name="stop" timeout="20"/>
+              <op id="ms2-monitor-interval-10" interval="10" name="monitor" role="Master" timeout="20"/>
+              <op id="ms2-monitor-interval-11" interval="11" name="monitor" role="Slave" timeout="20"/>
+            </operations>
+            <meta_attributes id="ms2-meta_attributes">
+              <nvpair id="ms2-meta_attributes-a" name="a" value="2"/>
+              <nvpair id="ms2-meta_attributes-b" name="b" value="2"/>
+              <nvpair id="ms2-meta_attributes-d" name="d" value="2"/>
+            </meta_attributes>
+          </primitive>
+          <meta_attributes id="group3-meta_attributes">
+            <nvpair id="group3-meta_attributes-a" name="a" value="3"/>
+            <nvpair id="group3-meta_attributes-b" name="b" value="3"/>
+            <nvpair id="group3-meta_attributes-c" name="c" value="3"/>
+          </meta_attributes>
+        </group>
+        <meta_attributes id="group3-master-meta_attributes">
+          <nvpair id="group3-master-meta_attributes-a" name="a" value="0"/>
+          <nvpair id="group3-master-meta_attributes-c" name="c" value="0"/>
+        </meta_attributes>
+      </master>
+    </resources>
+    <constraints>
+      <rsc_location id="location-dummy1-node1-10" node="node1" rsc="dummy1" score="10"/>
+      <rsc_location id="location-dummy1-node2--INFINITY" node="node2" rsc="dummy1" score="-INFINITY"/>
+    </constraints>
+  </configuration>
+  <status>
+    <node_state id="3" uname="node3" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <transient_attributes id="3">
+        <instance_attributes id="status-3">
+          <nvpair id="status-3-shutdown" name="shutdown" value="0"/>
+          <nvpair id="status-3-probe_complete" name="probe_complete" value="true"/>
+          <nvpair id="status-3-last-failure-node3-stonith" name="last-failure-node3-stonith" value="1435908482"/>
+          <nvpair id="status-3-master-ms" name="master-ms" value="10"/>
+          <nvpair id="status-3-last-failure-node2-stonith" name="last-failure-node2-stonith" value="1435908509"/>
+          <nvpair id="status-3-last-failure-node1-stonith" name="last-failure-node1-stonith" value="1435908524"/>
+          <nvpair id="status-3-master-ms1" name="master-ms1" value="10"/>
+          <nvpair id="status-3-master-ms2" name="master-ms2" value="10"/>
+        </instance_attributes>
+      </transient_attributes>
+      <lrm id="3">
+        <lrm_resources>
+          <lrm_resource id="node1-stonith" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="node1-stonith_last_0" operation_key="node1-stonith_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="24:171:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" transition-magic="0:0;24:171:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" call-id="112" rc-code="0" op-status="0" interval="0" last-run="1436001980" last-rc-change="1436001980" exec-time="8" queue-time="0" op-digest="03b679c5fe58755e39524207146b5d79" on_node="node3"/>
+            <lrm_rsc_op id="node1-stonith_monitor_60000" operation_key="node1-stonith_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="16:172:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" transition-magic="0:0;16:172:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" call-id="114" rc-code="0" op-status="0" interval="60000" last-rc-change="1436001981" exec-time="10" queue-time="0" op-digest="d6b963bc9031cd423f30674cd501b826" on_node="node3"/>
+          </lrm_resource>
+          <lrm_resource id="node2-stonith" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="node2-stonith_last_0" operation_key="node2-stonith_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="31:2:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;31:2:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="121" rc-code="0" op-status="0" interval="0" last-run="1436002943" last-rc-change="1436002943" exec-time="0" queue-time="0" op-digest="1c2b2b347bd911cf64a7197c277a554e" on_node="node3"/>
+          </lrm_resource>
+          <lrm_resource id="dummy1" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="dummy1_last_0" operation_key="dummy1_stop_0" operation="stop" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="36:2:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;36:2:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="123" rc-code="0" op-status="0" interval="0" last-run="1436002943" last-rc-change="1436002943" exec-time="21" queue-time="0" op-digest="07c70cdfaab292cf9afd6ca7c583b7ff" on_node="node3"/>
+          </lrm_resource>
+          <lrm_resource id="ms1" type="Stateful" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="ms1_last_0" operation_key="ms1_promote_0" operation="promote" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="98:28:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" transition-magic="0:0;98:28:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" call-id="104" rc-code="0" op-status="0" interval="0" last-run="1435912249" last-rc-change="1435912249" exec-time="57" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node3"/>
+            <lrm_rsc_op id="ms1_monitor_10000" operation_key="ms1_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="102:29:8:f4172d03-c62e-42f6-8bf1-bef588fdb12d" transition-magic="0:8;102:29:8:f4172d03-c62e-42f6-8bf1-bef588fdb12d" call-id="105" rc-code="8" op-status="0" interval="10000" last-rc-change="1435912249" exec-time="17" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="node3"/>
+          </lrm_resource>
+          <lrm_resource id="dummy3" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="dummy3_last_0" operation_key="dummy3_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="24:172:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" transition-magic="0:0;24:172:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" call-id="116" rc-code="0" op-status="0" interval="0" last-run="1436001981" last-rc-change="1436001981" exec-time="12" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node3" op-force-resta [...]
+            <lrm_rsc_op id="dummy3_monitor_10000" operation_key="dummy3_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="25:172:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" transition-magic="0:0;25:172:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" call-id="117" rc-code="0" op-status="0" interval="10000" last-rc-change="1436001981" exec-time="23" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="node3"/>
+          </lrm_resource>
+          <lrm_resource id="dummy4" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="dummy4_last_0" operation_key="dummy4_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="26:172:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" transition-magic="0:0;26:172:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" call-id="118" rc-code="0" op-status="0" interval="0" last-run="1436001981" last-rc-change="1436001981" exec-time="27" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node3" op-force-resta [...]
+            <lrm_rsc_op id="dummy4_monitor_10000" operation_key="dummy4_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="27:172:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" transition-magic="0:0;27:172:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" call-id="119" rc-code="0" op-status="0" interval="10000" last-rc-change="1436001981" exec-time="17" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="node3"/>
+          </lrm_resource>
+          <lrm_resource id="dummy5" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="dummy5_last_0" operation_key="dummy5_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="37:5:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" transition-magic="0:0;37:5:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" call-id="55" rc-code="0" op-status="0" interval="0" last-run="1435908482" last-rc-change="1435908482" exec-time="11" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node3" op-force-restart="  [...]
+            <lrm_rsc_op id="dummy5_monitor_10000" operation_key="dummy5_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="38:5:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" transition-magic="0:0;38:5:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" call-id="56" rc-code="0" op-status="0" interval="10000" last-rc-change="1435908482" exec-time="22" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="node3"/>
+          </lrm_resource>
+          <lrm_resource id="dummy6" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="dummy6_last_0" operation_key="dummy6_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="35:5:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" transition-magic="0:0;35:5:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" call-id="52" rc-code="0" op-status="0" interval="0" last-run="1435908482" last-rc-change="1435908482" exec-time="15" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node3" op-force-restart="  [...]
+            <lrm_rsc_op id="dummy6_monitor_10000" operation_key="dummy6_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="36:5:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" transition-magic="0:0;36:5:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" call-id="54" rc-code="0" op-status="0" interval="10000" last-rc-change="1435908482" exec-time="14" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="node3"/>
+          </lrm_resource>
+          <lrm_resource id="node3-stonith" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="node3-stonith_last_0" operation_key="node3-stonith_stop_0" operation="stop" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="40:6:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;40:6:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="125" rc-code="0" op-status="0" interval="0" last-run="1436002948" last-rc-change="1436002948" exec-time="1" queue-time="0" op-digest="0dfd0307cf223a454f67b2dde2575d1d" on_node="node3"/>
+            <lrm_rsc_op id="node3-stonith_monitor_60000" operation_key="node3-stonith_monitor_60000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="31:17:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" transition-magic="0:0;31:17:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" call-id="89" rc-code="0" op-status="0" interval="60000" last-rc-change="1435909423" exec-time="7" queue-time="0" op-digest="1cc31e1e2de01cec150b6f4adfafb5c5" on_node="node3"/>
+          </lrm_resource>
+          <lrm_resource id="ms" type="Stateful" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="ms_last_0" operation_key="ms_promote_0" operation="promote" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="76:9:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" transition-magic="0:0;76:9:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" call-id="63" rc-code="0" op-status="0" interval="0" last-run="1435908524" last-rc-change="1435908524" exec-time="94" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node3"/>
+            <lrm_rsc_op id="ms_monitor_10000" operation_key="ms_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="72:11:8:f4172d03-c62e-42f6-8bf1-bef588fdb12d" transition-magic="0:8;72:11:8:f4172d03-c62e-42f6-8bf1-bef588fdb12d" call-id="67" rc-code="8" op-status="0" interval="10000" last-rc-change="1435908545" exec-time="21" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="node3"/>
+          </lrm_resource>
+          <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="25:5:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" transition-magic="0:0;25:5:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" call-id="49" rc-code="0" op-status="0" interval="0" last-run="1435908482" last-rc-change="1435908482" exec-time="21" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node3" op-force-restart=" st [...]
+            <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="26:5:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" transition-magic="0:0;26:5:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" call-id="51" rc-code="0" op-status="0" interval="10000" last-rc-change="1435908482" exec-time="18" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="node3"/>
+          </lrm_resource>
+          <lrm_resource id="ms2" type="Stateful" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="ms2_last_0" operation_key="ms2_promote_0" operation="promote" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="105:29:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" transition-magic="0:0;105:29:0:f4172d03-c62e-42f6-8bf1-bef588fdb12d" call-id="106" rc-code="0" op-status="0" interval="0" last-run="1435912249" last-rc-change="1435912249" exec-time="50" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node3"/>
+            <lrm_rsc_op id="ms2_monitor_10000" operation_key="ms2_monitor_10000" operation="monitor" crm-debug-origin="build_active_RAs" crm_feature_set="3.0.9" transition-key="107:30:8:f4172d03-c62e-42f6-8bf1-bef588fdb12d" transition-magic="0:8;107:30:8:f4172d03-c62e-42f6-8bf1-bef588fdb12d" call-id="107" rc-code="8" op-status="0" interval="10000" last-rc-change="1435912249" exec-time="10" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="node3"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+    <node_state id="1" uname="node1" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <transient_attributes id="1">
+        <instance_attributes id="status-1">
+          <nvpair id="status-1-shutdown" name="shutdown" value="0"/>
+          <nvpair id="status-1-probe_complete" name="probe_complete" value="true"/>
+          <nvpair id="status-1-master-ms" name="master-ms" value="5"/>
+          <nvpair id="status-1-master-ms1" name="master-ms1" value="5"/>
+          <nvpair id="status-1-master-ms2" name="master-ms2" value="5"/>
+        </instance_attributes>
+      </transient_attributes>
+      <lrm id="1">
+        <lrm_resources>
+          <lrm_resource id="node1-stonith" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="node1-stonith_last_0" operation_key="node1-stonith_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="16:2:7:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:7;16:2:7:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="5" rc-code="7" op-status="0" interval="0" last-run="1436002943" last-rc-change="1436002943" exec-time="1" queue-time="0" op-digest="03b679c5fe58755e39524207146b5d79" on_node="node1"/>
+          </lrm_resource>
+          <lrm_resource id="node2-stonith" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="node2-stonith_last_0" operation_key="node2-stonith_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="17:3:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;17:3:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="58" rc-code="0" op-status="0" interval="0" last-run="1436002944" last-rc-change="1436002944" exec-time="13" queue-time="0" op-digest="1c2b2b347bd911cf64a7197c277a554e" on_node="node1"/>
+            <lrm_rsc_op id="node2-stonith_monitor_60000" operation_key="node2-stonith_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="18:3:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;18:3:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="60" rc-code="0" op-status="0" interval="60000" last-rc-change="1436002944" exec-time="10" queue-time="0" op-digest="2ed73b76d29dfd28c764e516ffda6432" on_node="node1"/>
+          </lrm_resource>
+          <lrm_resource id="node3-stonith" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="node3-stonith_last_0" operation_key="node3-stonith_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="18:2:7:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:7;18:2:7:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="13" rc-code="7" op-status="0" interval="0" last-run="1436002944" last-rc-change="1436002944" exec-time="0" queue-time="0" op-digest="0dfd0307cf223a454f67b2dde2575d1d" on_node="node1"/>
+          </lrm_resource>
+          <lrm_resource id="dummy1" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="dummy1_last_0" operation_key="dummy1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="35:37:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;35:37:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="81" rc-code="0" op-status="0" interval="0" last-run="1436020250" last-rc-change="1436020250" exec-time="24" queue-time="0" op-digest="16d989b809c6743cad46d0d12b8a9262" on_node="node1" op-force-restar [...]
+            <lrm_rsc_op id="dummy1_monitor_10000" operation_key="dummy1_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="36:37:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;36:37:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="82" rc-code="0" op-status="0" interval="10000" last-rc-change="1436020250" exec-time="21" queue-time="0" op-digest="c94db5a1993f190ecfd975fd8fe499b3" on_node="node1"/>
+          </lrm_resource>
+          <lrm_resource id="dummy3" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="dummy3_last_0" operation_key="dummy3_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="20:2:7:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:7;20:2:7:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="21" rc-code="7" op-status="0" interval="0" last-run="1436002944" last-rc-change="1436002944" exec-time="14" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node1" op-force-rest [...]
+          </lrm_resource>
+          <lrm_resource id="dummy4" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="dummy4_last_0" operation_key="dummy4_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="21:2:7:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:7;21:2:7:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="25" rc-code="7" op-status="0" interval="0" last-run="1436002944" last-rc-change="1436002944" exec-time="14" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node1" op-force-rest [...]
+          </lrm_resource>
+          <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="49:2:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;49:2:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="56" rc-code="0" op-status="0" interval="0" last-run="1436002944" last-rc-change="1436002944" exec-time="27" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node1" op-force-restart="  [...]
+            <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="35:3:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;35:3:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="62" rc-code="0" op-status="0" interval="10000" last-rc-change="1436002944" exec-time="20" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="node1"/>
+          </lrm_resource>
+          <lrm_resource id="dummy6" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="dummy6_last_0" operation_key="dummy6_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="63:2:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;63:2:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="57" rc-code="0" op-status="0" interval="0" last-run="1436002944" last-rc-change="1436002944" exec-time="29" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node1" op-force-restart= [...]
+            <lrm_rsc_op id="dummy6_monitor_10000" operation_key="dummy6_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="50:3:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;50:3:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="63" rc-code="0" op-status="0" interval="10000" last-rc-change="1436002945" exec-time="17" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="node1"/>
+          </lrm_resource>
+          <lrm_resource id="dummy5" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="dummy5_last_0" operation_key="dummy5_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="51:3:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;51:3:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="64" rc-code="0" op-status="0" interval="0" last-run="1436002945" last-rc-change="1436002945" exec-time="22" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node1" op-force-restart= [...]
+            <lrm_rsc_op id="dummy5_monitor_10000" operation_key="dummy5_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="52:3:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;52:3:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="66" rc-code="0" op-status="0" interval="10000" last-rc-change="1436002945" exec-time="11" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="node1"/>
+          </lrm_resource>
+          <lrm_resource id="ms" type="Stateful" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="ms_last_0" operation_key="ms_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="69:3:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;69:3:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="65" rc-code="0" op-status="0" interval="0" last-run="1436002945" last-rc-change="1436002945" exec-time="83" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node1"/>
+            <lrm_rsc_op id="ms_monitor_11000" operation_key="ms_monitor_11000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="70:3:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;70:3:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="68" rc-code="0" op-status="0" interval="11000" last-rc-change="1436002945" exec-time="43" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="node1"/>
+          </lrm_resource>
+          <lrm_resource id="ms1" type="Stateful" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="ms1_last_0" operation_key="ms1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="95:3:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;95:3:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="67" rc-code="0" op-status="0" interval="0" last-run="1436002945" last-rc-change="1436002945" exec-time="110" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node1"/>
+            <lrm_rsc_op id="ms1_monitor_11000" operation_key="ms1_monitor_11000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="101:4:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;101:4:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="69" rc-code="0" op-status="0" interval="11000" last-rc-change="1436002945" exec-time="23" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="node1"/>
+          </lrm_resource>
+          <lrm_resource id="ms2" type="Stateful" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="ms2_last_0" operation_key="ms2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="102:4:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;102:4:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="70" rc-code="0" op-status="0" interval="0" last-run="1436002945" last-rc-change="1436002945" exec-time="61" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node1"/>
+            <lrm_rsc_op id="ms2_monitor_11000" operation_key="ms2_monitor_11000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="103:4:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;103:4:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="71" rc-code="0" op-status="0" interval="11000" last-rc-change="1436002945" exec-time="10" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="node1"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+    <node_state id="2" uname="node2" in_ccm="true" crmd="online" crm-debug-origin="do_update_resource" join="member" expected="member">
+      <transient_attributes id="2">
+        <instance_attributes id="status-2">
+          <nvpair id="status-2-shutdown" name="shutdown" value="0"/>
+          <nvpair id="status-2-probe_complete" name="probe_complete" value="true"/>
+          <nvpair id="status-2-master-ms" name="master-ms" value="5"/>
+          <nvpair id="status-2-master-ms1" name="master-ms1" value="5"/>
+          <nvpair id="status-2-master-ms2" name="master-ms2" value="5"/>
+        </instance_attributes>
+      </transient_attributes>
+      <lrm id="2">
+        <lrm_resources>
+          <lrm_resource id="node1-stonith" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="node1-stonith_last_0" operation_key="node1-stonith_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="23:6:7:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:7;23:6:7:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="5" rc-code="7" op-status="0" interval="0" last-run="1436002948" last-rc-change="1436002948" exec-time="1" queue-time="0" op-digest="03b679c5fe58755e39524207146b5d79" on_node="node2"/>
+          </lrm_resource>
+          <lrm_resource id="node2-stonith" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="node2-stonith_last_0" operation_key="node2-stonith_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="24:6:7:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:7;24:6:7:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="9" rc-code="7" op-status="0" interval="0" last-run="1436002949" last-rc-change="1436002949" exec-time="0" queue-time="0" op-digest="1c2b2b347bd911cf64a7197c277a554e" on_node="node2"/>
+          </lrm_resource>
+          <lrm_resource id="node3-stonith" type="fence_xvm" class="stonith">
+            <lrm_rsc_op id="node3-stonith_last_0" operation_key="node3-stonith_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="27:7:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;27:7:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="58" rc-code="0" op-status="0" interval="0" last-run="1436002949" last-rc-change="1436002949" exec-time="31" queue-time="0" op-digest="0dfd0307cf223a454f67b2dde2575d1d" on_node="node2"/>
+            <lrm_rsc_op id="node3-stonith_monitor_60000" operation_key="node3-stonith_monitor_60000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="28:7:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;28:7:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="60" rc-code="0" op-status="0" interval="60000" last-rc-change="1436002949" exec-time="18" queue-time="0" op-digest="1cc31e1e2de01cec150b6f4adfafb5c5" on_node="node2"/>
+          </lrm_resource>
+          <lrm_resource id="dummy1" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="dummy1_last_0" operation_key="dummy1_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="26:6:7:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:7;26:6:7:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="17" rc-code="7" op-status="0" interval="0" last-run="1436002949" last-rc-change="1436002949" exec-time="17" queue-time="0" op-digest="07c70cdfaab292cf9afd6ca7c583b7ff" on_node="node2" op-force-rest [...]
+          </lrm_resource>
+          <lrm_resource id="dummy3" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="dummy3_last_0" operation_key="dummy3_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="27:6:7:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:7;27:6:7:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="21" rc-code="7" op-status="0" interval="0" last-run="1436002949" last-rc-change="1436002949" exec-time="18" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node2" op-force-rest [...]
+          </lrm_resource>
+          <lrm_resource id="dummy4" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="dummy4_last_0" operation_key="dummy4_monitor_0" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="28:6:7:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:7;28:6:7:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="25" rc-code="7" op-status="0" interval="0" last-run="1436002949" last-rc-change="1436002949" exec-time="23" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node2" op-force-rest [...]
+          </lrm_resource>
+          <lrm_resource id="dummy" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="dummy_last_0" operation_key="dummy_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="57:6:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;57:6:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="56" rc-code="0" op-status="0" interval="0" last-run="1436002949" last-rc-change="1436002949" exec-time="55" queue-time="1" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node2" op-force-restart="  [...]
+            <lrm_rsc_op id="dummy_monitor_10000" operation_key="dummy_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="45:7:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;45:7:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="59" rc-code="0" op-status="0" interval="10000" last-rc-change="1436002949" exec-time="53" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="node2"/>
+          </lrm_resource>
+          <lrm_resource id="dummy6" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="dummy6_last_0" operation_key="dummy6_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="79:6:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;79:6:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="57" rc-code="0" op-status="0" interval="0" last-run="1436002949" last-rc-change="1436002949" exec-time="53" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node2" op-force-restart= [...]
+            <lrm_rsc_op id="dummy6_monitor_10000" operation_key="dummy6_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="68:7:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;68:7:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="61" rc-code="0" op-status="0" interval="10000" last-rc-change="1436002949" exec-time="31" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="node2"/>
+          </lrm_resource>
+          <lrm_resource id="dummy5" type="Dummy" class="ocf" provider="heartbeat">
+            <lrm_rsc_op id="dummy5_last_0" operation_key="dummy5_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="69:7:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;69:7:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="62" rc-code="0" op-status="0" interval="0" last-run="1436002949" last-rc-change="1436002949" exec-time="34" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node2" op-force-restart= [...]
+            <lrm_rsc_op id="dummy5_monitor_10000" operation_key="dummy5_monitor_10000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="70:7:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;70:7:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="64" rc-code="0" op-status="0" interval="10000" last-rc-change="1436002949" exec-time="20" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="node2"/>
+          </lrm_resource>
+          <lrm_resource id="ms" type="Stateful" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="ms_last_0" operation_key="ms_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="85:7:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;85:7:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="63" rc-code="0" op-status="0" interval="0" last-run="1436002949" last-rc-change="1436002949" exec-time="115" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node2"/>
+            <lrm_rsc_op id="ms_monitor_11000" operation_key="ms_monitor_11000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="89:8:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;89:8:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="66" rc-code="0" op-status="0" interval="11000" last-rc-change="1436002949" exec-time="38" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="node2"/>
+          </lrm_resource>
+          <lrm_resource id="ms1" type="Stateful" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="ms1_last_0" operation_key="ms1_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="123:7:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;123:7:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="65" rc-code="0" op-status="0" interval="0" last-run="1436002949" last-rc-change="1436002949" exec-time="115" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node2"/>
+            <lrm_rsc_op id="ms1_monitor_11000" operation_key="ms1_monitor_11000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="128:8:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;128:8:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="67" rc-code="0" op-status="0" interval="11000" last-rc-change="1436002949" exec-time="46" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="node2"/>
+          </lrm_resource>
+          <lrm_resource id="ms2" type="Stateful" class="ocf" provider="pacemaker">
+            <lrm_rsc_op id="ms2_last_0" operation_key="ms2_start_0" operation="start" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="129:8:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;129:8:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="68" rc-code="0" op-status="0" interval="0" last-run="1436002949" last-rc-change="1436002949" exec-time="62" queue-time="0" op-digest="f2317cad3d54cec5d7d7aa7d0bf35cf8" on_node="node2"/>
+            <lrm_rsc_op id="ms2_monitor_11000" operation_key="ms2_monitor_11000" operation="monitor" crm-debug-origin="do_update_resource" crm_feature_set="3.0.9" transition-key="130:8:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" transition-magic="0:0;130:8:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6" call-id="69" rc-code="0" op-status="0" interval="11000" last-rc-change="1436002949" exec-time="21" queue-time="0" op-digest="4811cef7f7f94e3a35a70be7916cb2fd" on_node="node2"/>
+          </lrm_resource>
+        </lrm_resources>
+      </lrm>
+    </node_state>
+  </status>
+</cib>
diff --git a/pcsd/test/cluster.conf b/pcsd/test/cluster.conf
new file mode 100644
index 0000000..1988971
--- /dev/null
+++ b/pcsd/test/cluster.conf
@@ -0,0 +1,27 @@
+<cluster config_version="9" name="test99">
+  <fence_daemon/>
+  <clusternodes>
+    <clusternode name="rh7-1" nodeid="1">
+      <fence>
+        <method name="pcmk-method">
+          <device name="pcmk-redirect" port="rh7-1"/>
+        </method>
+      </fence>
+    </clusternode>
+    <clusternode name="rh7-2" nodeid="2">
+      <fence>
+        <method name="pcmk-method">
+          <device name="pcmk-redirect" port="rh7-2"/>
+        </method>
+      </fence>
+    </clusternode>
+  </clusternodes>
+  <cman broadcast="no" expected_votes="1" transport="udpu" two_node="1"/>
+  <fencedevices>
+    <fencedevice agent="fence_pcmk" name="pcmk-redirect"/>
+  </fencedevices>
+  <rm>
+    <failoverdomains/>
+    <resources/>
+  </rm>
+</cluster>
diff --git a/pcsd/test/corosync.conf b/pcsd/test/corosync.conf
new file mode 100644
index 0000000..08d50b1
--- /dev/null
+++ b/pcsd/test/corosync.conf
@@ -0,0 +1,27 @@
+totem {
+    version: 2
+    secauth: off
+    cluster_name: test99
+    transport: udpu
+    config_version: 9
+}
+
+nodelist {
+    node {
+        ring0_addr: rh7-1
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: rh7-2
+        nodeid: 2
+    }
+}
+
+quorum {
+    provider: corosync_votequorum
+}
+
+logging {
+    to_syslog: yes
+}
diff --git a/pcsd/test/crm1.xml b/pcsd/test/crm1.xml
new file mode 100644
index 0000000..d612301
--- /dev/null
+++ b/pcsd/test/crm1.xml
@@ -0,0 +1,112 @@
+<?xml version="1.0"?>
+<crm_mon version="1.1.12">
+    <summary>
+        <last_update time="Sun Jul  5 12:52:15 2015" />
+        <last_change time="Sun Jul  5 12:47:19 2015" user="" client="" origin="" />
+        <stack type="corosync" />
+        <current_dc present="true" version="1.1.12-a9c8177" name="node3" id="3" with_quorum="true" />
+        <nodes_configured number="3" expected_votes="unknown" />
+        <resources_configured number="24" />
+    </summary>
+    <nodes>
+        <node name="node1" id="1" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="8" type="member" />
+        <node name="node2" id="2" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="false" resources_running="7" type="member" />
+        <node name="node3" id="3" online="true" standby="false" standby_onfail="false" maintenance="false" pending="false" unclean="false" shutdown="false" expected_up="true" is_dc="true" resources_running="9" type="member" />
+    </nodes>
+    <resources>
+        <resource id="node1-stonith" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+            <node name="node3" id="3" cached="false"/>
+        </resource>
+        <resource id="node2-stonith" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+            <node name="node1" id="1" cached="false"/>
+        </resource>
+        <resource id="node3-stonith" resource_agent="stonith:fence_xvm" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+            <node name="node2" id="2" cached="false"/>
+        </resource>
+        <resource id="dummy1" resource_agent="ocf::heartbeat:Dummy" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+            <node name="node1" id="1" cached="false"/>
+        </resource>
+        <group id="group1" number_resources="2" >
+             <resource id="dummy3" resource_agent="ocf::heartbeat:Dummy" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+                 <node name="node3" id="3" cached="false"/>
+             </resource>
+             <resource id="dummy4" resource_agent="ocf::heartbeat:Dummy" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+                 <node name="node3" id="3" cached="false"/>
+             </resource>
+        </group>
+        <clone id="dummy-clone" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false" >
+            <resource id="dummy" resource_agent="ocf::heartbeat:Dummy" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+                <node name="node3" id="3" cached="false"/>
+            </resource>
+            <resource id="dummy" resource_agent="ocf::heartbeat:Dummy" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+                <node name="node1" id="1" cached="false"/>
+            </resource>
+            <resource id="dummy" resource_agent="ocf::heartbeat:Dummy" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+                <node name="node2" id="2" cached="false"/>
+            </resource>
+        </clone>
+        <clone id="group2-clone" multi_state="false" unique="false" managed="true" failed="false" failure_ignored="false" >
+            <group id="group2:0" number_resources="2" >
+                 <resource id="dummy6" resource_agent="ocf::heartbeat:Dummy" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+                     <node name="node3" id="3" cached="false"/>
+                 </resource>
+                 <resource id="dummy5" resource_agent="ocf::heartbeat:Dummy" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+                     <node name="node3" id="3" cached="false"/>
+                 </resource>
+            </group>
+            <group id="group2:1" number_resources="2" >
+                 <resource id="dummy6" resource_agent="ocf::heartbeat:Dummy" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+                     <node name="node1" id="1" cached="false"/>
+                 </resource>
+                 <resource id="dummy5" resource_agent="ocf::heartbeat:Dummy" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+                     <node name="node1" id="1" cached="false"/>
+                 </resource>
+            </group>
+            <group id="group2:2" number_resources="2" >
+                 <resource id="dummy6" resource_agent="ocf::heartbeat:Dummy" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+                     <node name="node2" id="2" cached="false"/>
+                 </resource>
+                 <resource id="dummy5" resource_agent="ocf::heartbeat:Dummy" role="Started" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+                     <node name="node2" id="2" cached="false"/>
+                 </resource>
+            </group>
+        </clone>
+        <clone id="ms-master" multi_state="true" unique="false" managed="true" failed="false" failure_ignored="false" >
+            <resource id="ms" resource_agent="ocf::pacemaker:Stateful" role="Master" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+                <node name="node3" id="3" cached="false"/>
+            </resource>
+            <resource id="ms" resource_agent="ocf::pacemaker:Stateful" role="Slave" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+                <node name="node1" id="1" cached="false"/>
+            </resource>
+            <resource id="ms" resource_agent="ocf::pacemaker:Stateful" role="Slave" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+                <node name="node2" id="2" cached="false"/>
+            </resource>
+        </clone>
+        <clone id="group3-master" multi_state="true" unique="false" managed="true" failed="false" failure_ignored="false" >
+            <group id="group3:0" number_resources="2" >
+                 <resource id="ms1" resource_agent="ocf::pacemaker:Stateful" role="Master" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+                     <node name="node3" id="3" cached="false"/>
+                 </resource>
+                 <resource id="ms2" resource_agent="ocf::pacemaker:Stateful" role="Master" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+                     <node name="node3" id="3" cached="false"/>
+                 </resource>
+            </group>
+            <group id="group3:1" number_resources="2" >
+                 <resource id="ms1" resource_agent="ocf::pacemaker:Stateful" role="Slave" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+                     <node name="node1" id="1" cached="false"/>
+                 </resource>
+                 <resource id="ms2" resource_agent="ocf::pacemaker:Stateful" role="Slave" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+                     <node name="node1" id="1" cached="false"/>
+                 </resource>
+            </group>
+            <group id="group3:2" number_resources="2" >
+                 <resource id="ms1" resource_agent="ocf::pacemaker:Stateful" role="Slave" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+                     <node name="node2" id="2" cached="false"/>
+                 </resource>
+                 <resource id="ms2" resource_agent="ocf::pacemaker:Stateful" role="Slave" active="true" orphaned="false" managed="true" failed="false" failure_ignored="false" nodes_running_on="1" >
+                     <node name="node2" id="2" cached="false"/>
+                 </resource>
+            </group>
+        </clone>
+    </resources>
+</crm_mon>
diff --git a/pcsd/test/pcs_settings.conf b/pcsd/test/pcs_settings.conf
new file mode 100644
index 0000000..3834f21
--- /dev/null
+++ b/pcsd/test/pcs_settings.conf
@@ -0,0 +1,21 @@
+{
+  "format_version": 2,
+  "data_version": 9,
+  "clusters": [
+    {
+      "name": "cluster71",
+      "nodes": [
+        "rh71-node1",
+        "rh71-node2"
+      ]
+    },
+    {
+      "name": "cluster67",
+      "nodes": [
+        "rh67-node1",
+        "rh67-node2",
+        "rh67-node3"
+      ]
+    }
+  ]
+}
diff --git a/pcsd/test/pcsd_test_utils.rb b/pcsd/test/pcsd_test_utils.rb
new file mode 100644
index 0000000..98e3336
--- /dev/null
+++ b/pcsd/test/pcsd_test_utils.rb
@@ -0,0 +1,27 @@
+CURRENT_DIR = File.expand_path(File.dirname(__FILE__))
+CFG_COROSYNC_CONF = File.join(CURRENT_DIR, "corosync.conf.tmp")
+CFG_CLUSTER_CONF = File.join(CURRENT_DIR, "cluster.conf.tmp")
+CFG_PCSD_SETTINGS = File.join(CURRENT_DIR, "pcs_settings.conf.tmp")
+CFG_PCSD_USERS = File.join(CURRENT_DIR, "pcs_users.conf.tmp")
+CFG_PCSD_TOKENS = File.join(CURRENT_DIR, 'tokens.tmp')
+
+CFG_SYNC_CONTROL = File.join(CURRENT_DIR, 'cfgsync_ctl.tmp')
+
+class MockLogger
+  attr_reader :log
+
+  def initialize
+    @log = []
+  end
+
+  def clean
+    @log = []
+  end
+
+  ['fatal', 'error', 'warn', 'info', 'debug'].each { |level|
+    define_method(level) { |message|
+      @log << [level, message]
+      return self
+    }
+  }
+end
diff --git a/pcsd/test/test_all_suite.rb b/pcsd/test/test_all_suite.rb
new file mode 100644
index 0000000..bc02ac8
--- /dev/null
+++ b/pcsd/test/test_all_suite.rb
@@ -0,0 +1,11 @@
+require 'rubygems'
+require 'test/unit'
+
+require 'test_corosyncconf.rb'
+require 'test_cluster.rb'
+require 'test_cluster_entity.rb'
+require 'test_auth.rb'
+require 'test_permissions.rb'
+require 'test_config.rb'
+require 'test_cfgsync.rb'
+require 'test_pcs.rb'
diff --git a/pcsd/test/test_auth.rb b/pcsd/test/test_auth.rb
new file mode 100644
index 0000000..0e9b1c0
--- /dev/null
+++ b/pcsd/test/test_auth.rb
@@ -0,0 +1,95 @@
+require 'test/unit'
+
+require 'pcsd_test_utils.rb'
+require 'auth.rb'
+
+class TestAuth < Test::Unit::TestCase
+
+  class ::PCSAuth
+    def self.getUsersGroups(username)
+      groups = {
+        'user1' => ['group1', 'haclient'],
+        'user2' => ['group2'],
+      }
+      if groups.key?(username)
+        return true, groups[username]
+      else
+        return false, []
+      end
+    end
+  end
+
+  def setup
+    $user_pass_file = CFG_PCSD_USERS
+    $logger = MockLogger.new
+  end
+
+  def testLoginByToken
+    users = []
+    users << {"username" => "user1", "token" => "token1"}
+    users << {"username" => "user2", "token" => "token2"}
+    users << {"username" => SUPERUSER, "token" => "tokenS"}
+    password_file = File.open($user_pass_file, File::RDWR|File::CREAT)
+    password_file.truncate(0)
+    password_file.rewind
+    password_file.write(JSON.pretty_generate(users))
+    password_file.close()
+
+    session = {}
+    cookies = {}
+    result = PCSAuth.loginByToken(session, cookies)
+    assert_equal(false, result)
+    assert_equal({}, session)
+
+    session = {}
+    cookies = {'token' => 'tokenX'}
+    result = PCSAuth.loginByToken(session, cookies)
+    assert_equal(false, result)
+    assert_equal({}, session)
+
+    session = {}
+    cookies = {'token' => 'token1'}
+    result = PCSAuth.loginByToken(session, cookies)
+    assert_equal(true, result)
+    assert_equal(
+      {:username => 'user1', :usergroups => ['group1', 'haclient']},
+      session
+    )
+
+    session = {}
+    cookies = {
+      'token' => 'token1',
+      'CIB_user' => 'userX',
+      'CIB_user_groups' => PCSAuth.cookieUserEncode('groupX')
+    }
+    result = PCSAuth.loginByToken(session, cookies)
+    assert_equal(true, result)
+    assert_equal(
+      {:username => 'user1', :usergroups => ['group1', 'haclient']},
+      session
+    )
+
+    session = {}
+    cookies = {'token' => 'tokenS'}
+    result = PCSAuth.loginByToken(session, cookies)
+    assert_equal(true, result)
+    assert_equal(
+      {:username => SUPERUSER, :usergroups => []},
+      session
+    )
+
+    session = {}
+    cookies = {
+      'token' => 'tokenS',
+      'CIB_user' => 'userX',
+      'CIB_user_groups' => PCSAuth.cookieUserEncode('groupX')
+    }
+    result = PCSAuth.loginByToken(session, cookies)
+    assert_equal(true, result)
+    assert_equal(
+      {:username => 'userX', :usergroups => ['groupX']},
+      session
+    )
+  end
+
+end
diff --git a/pcsd/test/test_cfgsync.rb b/pcsd/test/test_cfgsync.rb
new file mode 100644
index 0000000..2be42da
--- /dev/null
+++ b/pcsd/test/test_cfgsync.rb
@@ -0,0 +1,917 @@
+require 'test/unit'
+require 'fileutils'
+require 'thread'
+
+require 'pcsd_test_utils.rb'
+require 'cfgsync.rb'
+
+
+class TestCfgsync < Test::Unit::TestCase
+  def test_compare_version()
+    cfg1 = Cfgsync::ClusterConf.from_text(
+      '<cluster config_version="1" name="test1"/>'
+    )
+    cfg2 = Cfgsync::ClusterConf.from_text(
+      '<cluster config_version="1" name="test1"/>'
+    )
+    cfg3 = Cfgsync::ClusterConf.from_text(
+      '<cluster config_version="2" name="test1"/>'
+    )
+    cfg4 = Cfgsync::ClusterConf.from_text(
+      '<cluster config_version="2" name="test2"/>'
+    )
+
+    assert(cfg1 == cfg2)
+    assert(cfg1 < cfg3)
+    assert(cfg1 < cfg4)
+    assert(cfg3 > cfg1)
+    assert_equal("0ebab34c8034fd1cb268d1170de935a183d156cf", cfg3.hash)
+    assert_equal("0f22e8a496ae00815d8bcbf005fd7b645ba9f617", cfg4.hash)
+    assert(cfg3 < cfg4)
+
+    newest = [cfg1, cfg2, cfg3, cfg4].shuffle!.max
+    assert_equal(2, newest.version)
+    assert_equal('0f22e8a496ae00815d8bcbf005fd7b645ba9f617', newest.hash)
+  end
+end
+
+
+class TestClusterConf < Test::Unit::TestCase
+  def setup()
+    FileUtils.cp(File.join(CURRENT_DIR, "cluster.conf"), CFG_CLUSTER_CONF)
+  end
+
+  def test_basics()
+    assert_equal("cluster.conf", Cfgsync::ClusterConf.name)
+    text = '<cluster config_version="3" name="test1"/>'
+
+    cfg = Cfgsync::ClusterConf.from_text(text)
+    assert_equal(text, cfg.text)
+    assert_equal(3, cfg.version)
+    assert_equal("1c0ff62f0749bea0b877599a02f6557573f286e2", cfg.hash)
+
+    cfg.version = 4
+    assert_equal(4, cfg.version)
+    assert_equal("<cluster config_version='4' name='test1'/>", cfg.text)
+    assert_equal('589e22aaff926907cc1f4db48eeeb5e269e41c39', cfg.hash)
+
+    assert_equal(4, cfg.version)
+    assert_equal("<cluster config_version='4' name='test1'/>", cfg.text)
+    assert_equal('589e22aaff926907cc1f4db48eeeb5e269e41c39', cfg.hash)
+  end
+
+  def test_file()
+    cfg = Cfgsync::ClusterConf.from_file()
+    assert_equal(9, cfg.version)
+    assert_equal("198bda4b748ef646de867cb850cd3ad208c36d8b", cfg.hash)
+  end
+end
+
+
+class TestCorosyncConf < Test::Unit::TestCase
+  def setup()
+    FileUtils.cp(File.join(CURRENT_DIR, 'corosync.conf'), CFG_COROSYNC_CONF)
+  end
+
+  def test_basics()
+    assert_equal('corosync.conf', Cfgsync::CorosyncConf.name)
+    text = '
+totem {
+    version: 2
+    cluster_name: test99
+    config_version: 3
+}
+'
+    cfg = Cfgsync::CorosyncConf.from_text(text)
+    assert_equal(3, cfg.version)
+    assert_equal('570c9f0324f1dec73a632fa9ae4a0dd53ebf8bc7', cfg.hash)
+
+    cfg.version = 4
+    assert_equal(4, cfg.version)
+    assert_equal('efe2fc7d92ddf17ba1f14f334004c7c1933bb1e3', cfg.hash)
+
+    cfg.text = "\
+totem {
+    version: 2
+    cluster_name: test99
+    config_version: 4
+}
+"
+    assert_equal(4, cfg.version)
+    assert_equal('efe2fc7d92ddf17ba1f14f334004c7c1933bb1e3', cfg.hash)
+  end
+
+  def test_file()
+    cfg = Cfgsync::CorosyncConf.from_file()
+    assert_equal(9, cfg.version)
+    assert_equal('cd8faaf2367ceafba281387fb9dfe70eba51769c', cfg.hash)
+  end
+
+  def test_version()
+    text = '
+totem {
+    version: 2
+    cluster_name: test99
+}
+'
+    cfg = Cfgsync::CorosyncConf.from_text(text)
+    assert_equal(0, cfg.version)
+
+    text = '
+totem {
+    version: 2
+    cluster_name: test99
+    config_version: 3
+    config_version: 4
+}
+'
+    cfg = Cfgsync::CorosyncConf.from_text(text)
+    assert_equal(4, cfg.version)
+
+    text = '
+totem {
+    version: 2
+    cluster_name: test99
+    config_version: foo
+}
+'
+    cfg = Cfgsync::CorosyncConf.from_text(text)
+    assert_equal(0, cfg.version)
+
+    text = '
+totem {
+    version: 2
+    cluster_name: test99
+    config_version: 1foo
+}
+'
+    cfg = Cfgsync::CorosyncConf.from_text(text)
+    assert_equal(1, cfg.version)
+  end
+end
+
+
+class TestPcsdSettings < Test::Unit::TestCase
+  def setup()
+    FileUtils.cp(File.join(CURRENT_DIR, "pcs_settings.conf"), CFG_PCSD_SETTINGS)
+  end
+
+  def test_basics()
+    assert_equal("pcs_settings.conf", Cfgsync::PcsdSettings.name)
+    text = '
+{
+  "format_version": 2,
+  "data_version": 3,
+  "clusters": [
+    {
+      "name": "cluster71",
+      "nodes": [
+        "rh71-node1",
+        "rh71-node2"
+      ]
+    }
+  ],
+  "permissions": {
+    "local_cluster": [
+
+    ]
+  }
+}
+    '
+
+    cfg = Cfgsync::PcsdSettings.from_text(text)
+    assert_equal(text, cfg.text)
+    assert_equal(3, cfg.version)
+    assert_equal('b35f951a228ac0734d4c1e45fe73c03b18bca380', cfg.hash)
+
+    cfg.version = 4
+    assert_equal(4, cfg.version)
+    assert_equal('26579b79a27f9f56e1acd398eb761d2eb1872c6d', cfg.hash)
+
+    cfg.text = '{
+  "format_version": 2,
+  "data_version": 4,
+  "clusters": [
+    {
+      "name": "cluster71",
+      "nodes": [
+        "rh71-node1",
+        "rh71-node2"
+      ]
+    }
+  ]
+}'
+    assert_equal(4, cfg.version)
+    assert_equal('efe28c6d63dbce02da1a414ddb68fa1fc4f89c2e', cfg.hash)
+  end
+
+  def test_file()
+    cfg = Cfgsync::PcsdSettings.from_file()
+    assert_equal(9, cfg.version)
+    assert_equal("ac032803c5190d735cd94a702d42c5c6358013b8", cfg.hash)
+  end
+end
+
+
+class TestPcsdTokens < Test::Unit::TestCase
+  def setup()
+    FileUtils.cp(File.join(CURRENT_DIR, 'tokens'), CFG_PCSD_TOKENS)
+  end
+
+  def test_basics()
+    assert_equal('tokens', Cfgsync::PcsdTokens.name)
+    text =
+'{
+  "format_version": 2,
+  "data_version": 3,
+  "tokens": {
+    "rh7-1": "token-rh7-1",
+    "rh7-2": "token-rh7-2"
+  }
+}'
+
+    cfg = Cfgsync::PcsdTokens.from_text(text)
+    assert_equal(text, cfg.text)
+    assert_equal(3, cfg.version)
+    assert_equal('c362c4354ceb0b0425c71ed955d43f89c3d4304a', cfg.hash)
+
+    cfg.version = 4
+    assert_equal(4, cfg.version)
+    assert_equal('9586d6ce66f6fc649618f7f55005d8ddfe54db9b', cfg.hash)
+
+    cfg.text =
+'{
+  "format_version": 2,
+  "data_version": 4,
+  "tokens": {
+    "rh7-1": "token-rh7-1",
+    "rh7-2": "token-rh7-2"
+  }
+}'
+    assert_equal(4, cfg.version)
+    assert_equal('9586d6ce66f6fc649618f7f55005d8ddfe54db9b', cfg.hash)
+  end
+
+  def test_file()
+    cfg = Cfgsync::PcsdTokens.from_file()
+    assert_equal(9, cfg.version)
+    assert_equal('571afb6abc603f527462818e7dfe278a8a1f64a7', cfg.hash)
+  end
+end
+
+
+class TestConfigSyncControll < Test::Unit::TestCase
+  def setup()
+    file = File.open(CFG_SYNC_CONTROL, 'w')
+    file.write(JSON.pretty_generate({}))
+    file.close()
+    @thread_interval_default = 60
+    @thread_interval_minimum = 20
+    @file_backup_count_default = 50
+    @file_backup_count_minimum = 0
+  end
+
+  def test_bad_file()
+    FileUtils.rm(CFG_SYNC_CONTROL, {:force => true})
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_paused?())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_disabled?())
+    assert(Cfgsync::ConfigSyncControl.sync_thread_allowed?())
+    assert_equal(
+      @thread_interval_default,
+      Cfgsync::ConfigSyncControl.sync_thread_interval()
+    )
+
+    file = File.open(CFG_SYNC_CONTROL, 'w')
+    file.write('')
+    file.close()
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_paused?())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_disabled?())
+    assert(Cfgsync::ConfigSyncControl.sync_thread_allowed?())
+    assert_equal(
+      @thread_interval_default,
+      Cfgsync::ConfigSyncControl.sync_thread_interval()
+    )
+
+    file = File.open(CFG_SYNC_CONTROL, 'w')
+    file.write(JSON.pretty_generate({
+      'thread_paused_until' => 'abcde',
+      'thread_interval' => 'fghij',
+    }))
+    file.close()
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_paused?())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_disabled?())
+    assert(Cfgsync::ConfigSyncControl.sync_thread_allowed?())
+    assert_equal(
+      @thread_interval_default,
+      Cfgsync::ConfigSyncControl.sync_thread_interval()
+    )
+  end
+
+  def test_empty_file()
+    # see setup method
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_paused?())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_disabled?())
+    assert(Cfgsync::ConfigSyncControl.sync_thread_allowed?())
+    assert_equal(
+      @thread_interval_default,
+      Cfgsync::ConfigSyncControl.sync_thread_interval()
+    )
+  end
+
+  def test_paused()
+    semaphore = Mutex.new
+
+    assert(Cfgsync::ConfigSyncControl.sync_thread_resume())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_paused?())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_disabled?())
+    assert(Cfgsync::ConfigSyncControl.sync_thread_allowed?())
+
+    assert(Cfgsync::ConfigSyncControl.sync_thread_pause(semaphore))
+    assert(Cfgsync::ConfigSyncControl.sync_thread_paused?())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_disabled?())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_allowed?())
+
+    assert(Cfgsync::ConfigSyncControl.sync_thread_resume())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_paused?())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_disabled?())
+    assert(Cfgsync::ConfigSyncControl.sync_thread_allowed?())
+
+    assert(Cfgsync::ConfigSyncControl.sync_thread_pause(semaphore, 2))
+    assert(Cfgsync::ConfigSyncControl.sync_thread_paused?())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_disabled?())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_allowed?())
+    sleep(4)
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_paused?())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_disabled?())
+    assert(Cfgsync::ConfigSyncControl.sync_thread_allowed?())
+
+    assert(Cfgsync::ConfigSyncControl.sync_thread_pause(semaphore, '2'))
+    assert(Cfgsync::ConfigSyncControl.sync_thread_paused?())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_disabled?())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_allowed?())
+    sleep(4)
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_paused?())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_disabled?())
+    assert(Cfgsync::ConfigSyncControl.sync_thread_allowed?())
+
+    assert(Cfgsync::ConfigSyncControl.sync_thread_pause(semaphore, 'abcd'))
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_paused?())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_disabled?())
+    assert(Cfgsync::ConfigSyncControl.sync_thread_allowed?())
+  end
+
+  def test_disable()
+    semaphore = Mutex.new
+
+    assert(Cfgsync::ConfigSyncControl.sync_thread_enable())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_paused?())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_disabled?())
+    assert(Cfgsync::ConfigSyncControl.sync_thread_allowed?())
+
+    assert(Cfgsync::ConfigSyncControl.sync_thread_disable(semaphore))
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_paused?())
+    assert(Cfgsync::ConfigSyncControl.sync_thread_disabled?())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_allowed?())
+
+    assert(Cfgsync::ConfigSyncControl.sync_thread_enable())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_paused?())
+    assert(!Cfgsync::ConfigSyncControl.sync_thread_disabled?())
+    assert(Cfgsync::ConfigSyncControl.sync_thread_allowed?())
+  end
+
+  def test_interval()
+    assert_equal(
+      @thread_interval_default,
+      Cfgsync::ConfigSyncControl.sync_thread_interval()
+    )
+
+    interval = @thread_interval_default + @thread_interval_minimum
+    assert(Cfgsync::ConfigSyncControl.sync_thread_interval=(interval))
+    assert_equal(
+      interval,
+      Cfgsync::ConfigSyncControl.sync_thread_interval()
+    )
+
+    assert(Cfgsync::ConfigSyncControl.sync_thread_interval=(
+      @thread_interval_minimum / 2
+    ))
+    assert_equal(
+      @thread_interval_minimum,
+      Cfgsync::ConfigSyncControl.sync_thread_interval()
+    )
+
+    assert(Cfgsync::ConfigSyncControl.sync_thread_interval=(0))
+    assert_equal(
+      @thread_interval_minimum,
+      Cfgsync::ConfigSyncControl.sync_thread_interval()
+    )
+
+    assert(Cfgsync::ConfigSyncControl.sync_thread_interval=(-100))
+    assert_equal(
+      @thread_interval_minimum,
+      Cfgsync::ConfigSyncControl.sync_thread_interval()
+    )
+
+    assert(Cfgsync::ConfigSyncControl.sync_thread_interval=('abcd'))
+    assert_equal(
+      @thread_interval_default,
+      Cfgsync::ConfigSyncControl.sync_thread_interval()
+    )
+  end
+
+  def test_file_backup_count()
+    assert_equal(
+      @file_backup_count_default,
+      Cfgsync::ConfigSyncControl.file_backup_count()
+    )
+
+    count = @file_backup_count_default + @file_backup_count_minimum
+    assert(Cfgsync::ConfigSyncControl.file_backup_count=(count))
+    assert_equal(
+      count,
+      Cfgsync::ConfigSyncControl.file_backup_count()
+    )
+
+    assert(Cfgsync::ConfigSyncControl.file_backup_count=(
+      @file_backup_count_minimum / 2
+    ))
+    assert_equal(
+      @file_backup_count_minimum,
+      Cfgsync::ConfigSyncControl.file_backup_count()
+    )
+
+    assert(Cfgsync::ConfigSyncControl.file_backup_count=(0))
+    assert_equal(
+      @file_backup_count_minimum,
+      Cfgsync::ConfigSyncControl.file_backup_count()
+    )
+
+    assert(Cfgsync::ConfigSyncControl.file_backup_count=(-100))
+    assert_equal(
+      @file_backup_count_minimum,
+      Cfgsync::ConfigSyncControl.file_backup_count()
+    )
+
+    assert(Cfgsync::ConfigSyncControl.file_backup_count=('abcd'))
+    assert_equal(
+      @file_backup_count_default,
+      Cfgsync::ConfigSyncControl.file_backup_count()
+    )
+  end
+end
+
+
+class TestConfigFetcher < Test::Unit::TestCase
+  class ConfigFetcherMock < Cfgsync::ConfigFetcher
+    def get_configs_local()
+      return @configs_local
+    end
+
+    def set_configs_local(configs)
+      @configs_local = configs
+      return self
+    end
+
+    def get_configs_cluster(nodes, cluster_name)
+      return @configs_cluster
+    end
+
+    def set_configs_cluster(configs)
+      @configs_cluster = configs
+      return self
+    end
+
+    def find_newest_config_test(config_list)
+      return self.find_newest_config(config_list)
+    end
+  end
+
+  def test_find_newest_config()
+    cfg1 = Cfgsync::ClusterConf.from_text(
+      '<cluster config_version="1" name="test1"/>'
+    )
+    cfg2 = Cfgsync::ClusterConf.from_text(
+      '<cluster config_version="1" name="test1"/>'
+    )
+    cfg3 = Cfgsync::ClusterConf.from_text(
+      '<cluster config_version="2" name="test1"/>'
+    )
+    cfg4 = Cfgsync::ClusterConf.from_text(
+      '<cluster config_version="2" name="test2"/>'
+    )
+    assert(cfg1 == cfg2)
+    assert(cfg1 < cfg3)
+    assert(cfg1 < cfg4)
+    assert(cfg3 < cfg4)
+    fetcher = ConfigFetcherMock.new({}, nil, nil, nil)
+
+    # trivial case
+    assert_equal(cfg1, fetcher.find_newest_config_test([cfg1]))
+    # decide by version only
+    assert_equal(cfg3, fetcher.find_newest_config_test([cfg1, cfg2, cfg3]))
+    assert_equal(cfg3, fetcher.find_newest_config_test([cfg1, cfg1, cfg3]))
+    # in case of multiple configs with the same version decide by count
+    assert_equal(cfg3, fetcher.find_newest_config_test([cfg3, cfg3, cfg4]))
+    assert_equal(
+      cfg3, fetcher.find_newest_config_test([cfg1, cfg3, cfg3, cfg4])
+    )
+    # if the count is the same decide by hash
+    assert(cfg3 < cfg4)
+    assert_equal(cfg4, fetcher.find_newest_config_test([cfg3, cfg4]))
+    assert_equal(cfg4, fetcher.find_newest_config_test([cfg1, cfg3, cfg4]))
+    assert_equal(
+      cfg4, fetcher.find_newest_config_test([cfg3, cfg3, cfg4, cfg4])
+    )
+    assert_equal(
+      cfg4, fetcher.find_newest_config_test([cfg1, cfg3, cfg3, cfg4, cfg4])
+    )
+  end
+
+  def test_fetch()
+    cfg1 = Cfgsync::ClusterConf.from_text(
+      '<cluster config_version="1" name="test1"/>'
+    )
+    cfg2 = Cfgsync::ClusterConf.from_text(
+      '<cluster config_version="1" name="test1"/>'
+    )
+    cfg3 = Cfgsync::ClusterConf.from_text(
+      '<cluster config_version="2" name="test1"/>'
+    )
+    cfg4 = Cfgsync::ClusterConf.from_text(
+      '<cluster config_version="2" name="test2"/>'
+    )
+    assert(cfg1 == cfg2)
+    assert(cfg1 < cfg3)
+    assert(cfg1 < cfg4)
+    assert(cfg3 < cfg4)
+    cfg_name = Cfgsync::ClusterConf.name
+    fetcher = ConfigFetcherMock.new({}, [Cfgsync::ClusterConf], nil, nil)
+
+    # local config is synced
+    fetcher.set_configs_local({cfg_name => cfg1})
+
+    fetcher.set_configs_cluster({
+      'node1' => {'configs' => {cfg_name => cfg1}},
+    })
+    assert_equal([[], []], fetcher.fetch())
+
+    fetcher.set_configs_cluster({
+      'node1' => {'configs' => {cfg_name => cfg2}},
+    })
+    assert_equal([[], []], fetcher.fetch())
+
+    fetcher.set_configs_cluster({
+      'node1' => {'configs' => {cfg_name => cfg1}},
+      'node2' => {'configs' => {cfg_name => cfg2}},
+    })
+    assert_equal([[], []], fetcher.fetch())
+
+    fetcher.set_configs_cluster({
+      'node1' => {'configs' => {cfg_name => cfg1}},
+      'node2' => {'configs' => {cfg_name => cfg2}},
+      'node3' => {'configs' => {cfg_name => cfg2}},
+    })
+    assert_equal([[], []], fetcher.fetch())
+
+    # local config is older
+    fetcher.set_configs_local({cfg_name => cfg1})
+
+    fetcher.set_configs_cluster({
+      'node1' => {cfg_name => cfg3},
+    })
+    assert_equal([[cfg3], []], fetcher.fetch())
+
+    fetcher.set_configs_cluster({
+      'node1' => {cfg_name => cfg3},
+      'node2' => {cfg_name => cfg4},
+    })
+    assert_equal([[cfg4], []], fetcher.fetch())
+
+    fetcher.set_configs_cluster({
+      'node1' => {cfg_name => cfg3},
+      'node2' => {cfg_name => cfg4},
+      'node3' => {cfg_name => cfg3},
+    })
+    assert_equal([[cfg3], []], fetcher.fetch())
+
+    # local config is newer
+    fetcher.set_configs_local({cfg_name => cfg3})
+
+    fetcher.set_configs_cluster({
+      'node1' => {cfg_name => cfg1},
+    })
+    assert_equal([[], [cfg3]], fetcher.fetch())
+
+    fetcher.set_configs_cluster({
+      'node1' => {cfg_name => cfg1},
+      'node2' => {cfg_name => cfg1},
+    })
+    assert_equal([[], [cfg3]], fetcher.fetch())
+
+    # local config is the same version
+    fetcher.set_configs_local({cfg_name => cfg3})
+
+    fetcher.set_configs_cluster({
+      'node1' => {cfg_name => cfg3},
+    })
+    assert_equal([[], []], fetcher.fetch())
+
+    fetcher.set_configs_cluster({
+      'node1' => {cfg_name => cfg4},
+    })
+    assert_equal([[cfg4], []], fetcher.fetch())
+
+    fetcher.set_configs_cluster({
+      'node1' => {cfg_name => cfg3},
+      'node2' => {cfg_name => cfg4},
+    })
+    assert_equal([[cfg4], []], fetcher.fetch())
+
+    fetcher.set_configs_cluster({
+      'node1' => {cfg_name => cfg3},
+      'node2' => {cfg_name => cfg4},
+      'node3' => {cfg_name => cfg3},
+    })
+    assert_equal([[], []], fetcher.fetch())
+
+    fetcher.set_configs_cluster({
+      'node1' => {cfg_name => cfg3},
+      'node2' => {cfg_name => cfg4},
+      'node3' => {cfg_name => cfg4},
+    })
+    assert_equal([[cfg4], []], fetcher.fetch())
+
+    # local config is the same version
+    fetcher.set_configs_local({cfg_name => cfg4})
+
+    fetcher.set_configs_cluster({
+      'node1' => {cfg_name => cfg3},
+    })
+    assert_equal([[cfg3], []], fetcher.fetch())
+
+    fetcher.set_configs_cluster({
+      'node1' => {cfg_name => cfg4},
+    })
+    assert_equal([[], []], fetcher.fetch())
+
+    fetcher.set_configs_cluster({
+      'node1' => {cfg_name => cfg3},
+      'node2' => {cfg_name => cfg4},
+    })
+    assert_equal([[], []], fetcher.fetch())
+
+    fetcher.set_configs_cluster({
+      'node1' => {cfg_name => cfg3},
+      'node2' => {cfg_name => cfg4},
+      'node3' => {cfg_name => cfg3},
+    })
+    assert_equal([[cfg3], []], fetcher.fetch())
+
+    fetcher.set_configs_cluster({
+      'node1' => {cfg_name => cfg3},
+      'node2' => {cfg_name => cfg4},
+      'node3' => {cfg_name => cfg4},
+    })
+    assert_equal([[], []], fetcher.fetch())
+  end
+end
+
+
+class TestMergeTokens < Test::Unit::TestCase
+  def setup()
+    FileUtils.cp(File.join(CURRENT_DIR, 'tokens'), CFG_PCSD_TOKENS)
+  end
+
+  def test_nothing_to_merge()
+    old_cfg = Cfgsync::PcsdTokens.from_file()
+    new_cfg = Cfgsync::merge_tokens_files(old_cfg, nil, {})
+    assert_equal(old_cfg.text.strip, new_cfg.text.strip)
+
+    old_cfg = Cfgsync::PcsdTokens.from_file()
+    new_cfg = Cfgsync::merge_tokens_files(old_cfg, nil, {'rh7-4' => 'token4'})
+    new_cfg_text =
+'{
+  "format_version": 2,
+  "data_version": 9,
+  "tokens": {
+    "rh7-1": "2a8b40aa-b539-4713-930a-483468d62ef4",
+    "rh7-2": "76174e2c-09e8-4435-b318-5c6b8250a22c",
+    "rh7-3": "55844951-9ae5-4103-bb4a-64f9c1ea0a71",
+    "rh7-4": "token4"
+  }
+}'
+    assert_equal(new_cfg_text, new_cfg.text.strip)
+
+    old_cfg = Cfgsync::PcsdTokens.from_file()
+    new_cfg = Cfgsync::merge_tokens_files(old_cfg, nil, {'rh7-3' => 'token3'})
+    new_cfg_text =
+'{
+  "format_version": 2,
+  "data_version": 9,
+  "tokens": {
+    "rh7-1": "2a8b40aa-b539-4713-930a-483468d62ef4",
+    "rh7-2": "76174e2c-09e8-4435-b318-5c6b8250a22c",
+    "rh7-3": "token3"
+  }
+}'
+    assert_equal(new_cfg_text, new_cfg.text.strip)
+  end
+
+  def test_only_old_to_merge()
+    to_merge = [
+      Cfgsync::PcsdTokens.from_text(
+'{
+  "format_version": 2,
+  "data_version": 1,
+  "tokens": {
+    "rh7-1": "token1",
+    "rh7-4": "token4a"
+  }
+}'
+      )
+    ]
+
+    old_cfg = Cfgsync::PcsdTokens.from_file()
+    new_cfg = Cfgsync::merge_tokens_files(old_cfg, to_merge, {})
+    assert_equal(old_cfg.text.strip, new_cfg.text.strip)
+
+    old_cfg = Cfgsync::PcsdTokens.from_file()
+    new_cfg = Cfgsync::merge_tokens_files(old_cfg, to_merge, {'rh7-4' => 'token4'})
+    new_cfg_text =
+'{
+  "format_version": 2,
+  "data_version": 9,
+  "tokens": {
+    "rh7-1": "2a8b40aa-b539-4713-930a-483468d62ef4",
+    "rh7-2": "76174e2c-09e8-4435-b318-5c6b8250a22c",
+    "rh7-3": "55844951-9ae5-4103-bb4a-64f9c1ea0a71",
+    "rh7-4": "token4"
+  }
+}'
+    assert_equal(new_cfg_text, new_cfg.text.strip)
+
+    old_cfg = Cfgsync::PcsdTokens.from_file()
+    new_cfg = Cfgsync::merge_tokens_files(old_cfg, to_merge, {'rh7-3' => 'token3'})
+    new_cfg_text =
+'{
+  "format_version": 2,
+  "data_version": 9,
+  "tokens": {
+    "rh7-1": "2a8b40aa-b539-4713-930a-483468d62ef4",
+    "rh7-2": "76174e2c-09e8-4435-b318-5c6b8250a22c",
+    "rh7-3": "token3"
+  }
+}'
+    assert_equal(new_cfg_text, new_cfg.text.strip)
+  end
+
+  def test_one_to_merge()
+    to_merge = [
+      Cfgsync::PcsdTokens.from_text(
+'{
+  "format_version": 2,
+  "data_version": 11,
+  "tokens": {
+    "rh7-1": "token1",
+    "rh7-4": "token4a"
+  }
+}'
+      )
+    ]
+
+    old_cfg = Cfgsync::PcsdTokens.from_file()
+    new_cfg = Cfgsync::merge_tokens_files(old_cfg, to_merge, {})
+    new_cfg_text =
+'{
+  "format_version": 2,
+  "data_version": 11,
+  "tokens": {
+    "rh7-1": "token1",
+    "rh7-2": "76174e2c-09e8-4435-b318-5c6b8250a22c",
+    "rh7-3": "55844951-9ae5-4103-bb4a-64f9c1ea0a71",
+    "rh7-4": "token4a"
+  }
+}'
+    assert_equal(new_cfg_text, new_cfg.text.strip)
+
+    old_cfg = Cfgsync::PcsdTokens.from_file()
+    new_cfg = Cfgsync::merge_tokens_files(old_cfg, to_merge, {'rh7-4' => 'token4'})
+    new_cfg_text =
+'{
+  "format_version": 2,
+  "data_version": 11,
+  "tokens": {
+    "rh7-1": "token1",
+    "rh7-2": "76174e2c-09e8-4435-b318-5c6b8250a22c",
+    "rh7-3": "55844951-9ae5-4103-bb4a-64f9c1ea0a71",
+    "rh7-4": "token4"
+  }
+}'
+    assert_equal(new_cfg_text, new_cfg.text.strip)
+
+    old_cfg = Cfgsync::PcsdTokens.from_file()
+    new_cfg = Cfgsync::merge_tokens_files(old_cfg, to_merge, {'rh7-3' => 'token3'})
+    new_cfg_text =
+'{
+  "format_version": 2,
+  "data_version": 11,
+  "tokens": {
+    "rh7-1": "token1",
+    "rh7-2": "76174e2c-09e8-4435-b318-5c6b8250a22c",
+    "rh7-3": "token3",
+    "rh7-4": "token4a"
+  }
+}'
+    assert_equal(new_cfg_text, new_cfg.text.strip)
+  end
+
+  def test_more_to_merge()
+    to_merge_12 = Cfgsync::PcsdTokens.from_text(
+'{
+  "format_version": 2,
+  "data_version": 12,
+  "tokens": {
+    "rh7-2": "token2",
+    "rh7-4": "token4b"
+  }
+}'
+    )
+    to_merge_11 = Cfgsync::PcsdTokens.from_text(
+'{
+  "format_version": 2,
+  "data_version": 11,
+  "tokens": {
+    "rh7-1": "token1",
+    "rh7-4": "token4a"
+  }
+}'
+    )
+
+    new_cfg_text =
+'{
+  "format_version": 2,
+  "data_version": 12,
+  "tokens": {
+    "rh7-1": "token1",
+    "rh7-2": "token2",
+    "rh7-3": "55844951-9ae5-4103-bb4a-64f9c1ea0a71",
+    "rh7-4": "token4b"
+  }
+}'
+    old_cfg = Cfgsync::PcsdTokens.from_file()
+    new_cfg = Cfgsync::merge_tokens_files(
+      old_cfg, [to_merge_11, to_merge_12], {}
+    )
+    assert_equal(new_cfg_text, new_cfg.text.strip)
+    old_cfg = Cfgsync::PcsdTokens.from_file()
+    new_cfg = Cfgsync::merge_tokens_files(
+      old_cfg, [to_merge_12, to_merge_11], {}
+    )
+    assert_equal(new_cfg_text, new_cfg.text.strip)
+
+    new_cfg_text =
+'{
+  "format_version": 2,
+  "data_version": 12,
+  "tokens": {
+    "rh7-1": "token1",
+    "rh7-2": "token2",
+    "rh7-3": "55844951-9ae5-4103-bb4a-64f9c1ea0a71",
+    "rh7-4": "token4"
+  }
+}'
+    old_cfg = Cfgsync::PcsdTokens.from_file()
+    new_cfg = Cfgsync::merge_tokens_files(
+      old_cfg, [to_merge_11, to_merge_12], {'rh7-4' => 'token4'}
+    )
+    assert_equal(new_cfg_text, new_cfg.text.strip)
+    old_cfg = Cfgsync::PcsdTokens.from_file()
+    new_cfg = Cfgsync::merge_tokens_files(
+      old_cfg, [to_merge_12, to_merge_11], {'rh7-4' => 'token4'}
+    )
+    assert_equal(new_cfg_text, new_cfg.text.strip)
+
+    new_cfg_text =
+'{
+  "format_version": 2,
+  "data_version": 12,
+  "tokens": {
+    "rh7-1": "token1",
+    "rh7-2": "token2",
+    "rh7-3": "token3",
+    "rh7-4": "token4b"
+  }
+}'
+    old_cfg = Cfgsync::PcsdTokens.from_file()
+    new_cfg = Cfgsync::merge_tokens_files(
+      old_cfg, [to_merge_11, to_merge_12], {'rh7-3' => 'token3'}
+    )
+    assert_equal(new_cfg_text, new_cfg.text.strip)
+    old_cfg = Cfgsync::PcsdTokens.from_file()
+    new_cfg = Cfgsync::merge_tokens_files(
+      old_cfg, [to_merge_12, to_merge_11], {'rh7-3' => 'token3'}
+    )
+    assert_equal(new_cfg_text, new_cfg.text.strip)
+  end
+end
+
diff --git a/pcsd/test/test_cluster.rb b/pcsd/test/test_cluster.rb
new file mode 100644
index 0000000..a1806ca
--- /dev/null
+++ b/pcsd/test/test_cluster.rb
@@ -0,0 +1,51 @@
+require 'test/unit'
+
+require 'pcsd_test_utils.rb'
+require 'cluster.rb'
+
+class TestCluster < Test::Unit::TestCase
+
+  def test_empty
+    cluster = Cluster.new('test', [])
+    assert_equal('test', cluster.name)
+    assert_equal([], cluster.nodes)
+    assert_equal(0, cluster.num_nodes)
+  end
+
+  def test_nodes
+    cluster = Cluster.new('test', ['a', 'b'])
+    assert_equal('test', cluster.name)
+    assert_equal(['a', 'b'], cluster.nodes)
+    assert_equal(2, cluster.num_nodes)
+
+    cluster.nodes = ['x', 'y', 'z']
+    assert_equal('test', cluster.name)
+    assert_equal(['x', 'y', 'z'], cluster.nodes)
+    assert_equal(3, cluster.num_nodes)
+  end
+
+  def test_nodes_cleanup
+    cluster = Cluster.new('test', ['b', 'a'])
+    assert_equal('test', cluster.name)
+    assert_equal(['a', 'b'], cluster.nodes)
+    assert_equal(2, cluster.num_nodes)
+
+    cluster.nodes = ['z', 'x', 'y', 'z', 'x']
+    assert_equal('test', cluster.name)
+    assert_equal(['x', 'y', 'z'], cluster.nodes)
+    assert_equal(3, cluster.num_nodes)
+  end
+
+  def test_nodes_bad
+    cluster = Cluster.new('test', ['a', ['b', 'c'], 'd'])
+    assert_equal('test', cluster.name)
+    assert_equal(['a', 'd'], cluster.nodes)
+    assert_equal(2, cluster.num_nodes)
+
+    cluster.nodes = ['w', ['x'], 'y', [], 'z']
+    assert_equal('test', cluster.name)
+    assert_equal(['w', 'y', 'z'], cluster.nodes)
+    assert_equal(3, cluster.num_nodes)
+  end
+
+end
diff --git a/pcsd/test/test_cluster_entity.rb b/pcsd/test/test_cluster_entity.rb
new file mode 100644
index 0000000..2b67e19
--- /dev/null
+++ b/pcsd/test/test_cluster_entity.rb
@@ -0,0 +1,3226 @@
+require 'test/unit'
+require 'fileutils'
+require 'json'
+require 'rexml/document'
+require 'set'
+
+require 'pcsd_test_utils.rb'
+require 'cluster_entity.rb'
+
+CIB_FILE = 'cib1.xml'
+CRM_FILE = 'crm1.xml'
+
+def assert_equal_NvSet(set1, set2)
+  set1.each { |pair|
+    assert(set2.include?(pair.name), "Expected pair with name #{pair.name}")
+    assert(set2[pair.name].id == pair.id, "Id of pair differs. Expected: '#{pair.id}' but was '#{set2[pair.name].id}'")
+    assert(set2[pair.name].name == pair.name, "Name of pair differs. Expected: '#{pair.name}' but was '#{set2[pair.name].name}'")
+    assert(set2[pair.name].value == pair.value, "Value of pair differs. Expected: '#{pair.value}' but was '#{set2[pair.name].value}'")
+  }
+  set2.each { |pair|
+    assert(set1.include?(pair.name), "Found pair which is not expected (name:'#{pair.name}')")
+  }
+end
+
+class TestNvSet < Test::Unit::TestCase
+  def setup
+    @nvSet = ClusterEntity::NvSet.new
+  end
+
+  def test_empty?
+    assert(@nvSet.empty?)
+    @nvSet << ClusterEntity::NvPair.new(nil, nil)
+    assert_equal(false, @nvSet.empty?)
+    @nvSet << ClusterEntity::NvPair.new('id', 'name')
+    assert_equal(false, @nvSet.empty?)
+    @nvSet.delete(nil)
+    assert_equal(false, @nvSet.empty?)
+    @nvSet.delete('name')
+    assert(@nvSet.empty?)
+  end
+
+  def test_length
+    assert_equal(0, @nvSet.length)
+    @nvSet << ClusterEntity::NvPair.new(nil, nil)
+    assert_equal(1, @nvSet.length)
+    @nvSet << ClusterEntity::NvPair.new('id', 'name')
+    assert_equal(2, @nvSet.length)
+    @nvSet.delete('name')
+    assert_equal(1, @nvSet.length)
+    @nvSet.delete(nil)
+    assert_equal(0, @nvSet.length)
+    @nvSet << ClusterEntity::NvPair.new('id', 'name1')
+    @nvSet << ClusterEntity::NvPair.new('id', 'name2')
+    @nvSet << ClusterEntity::NvPair.new('id', 'name3')
+    @nvSet << ClusterEntity::NvPair.new('id', 'name4')
+    assert_equal(4, @nvSet.length)
+    @nvSet << ClusterEntity::NvPair.new('id', 'name1', 'val')
+    assert_equal(4, @nvSet.length)
+  end
+
+  def test_each
+    counter = 0
+    @nvSet.each { |e|
+      counter += 1
+    }
+    assert_equal(0, counter)
+    pair = ClusterEntity::NvPair.new('id1', 'name1')
+    @nvSet << pair
+    @nvSet.each { |e|
+      assert_equal(pair, e)
+      counter += 1
+    }
+    assert_equal(1, counter)
+    pairs = [pair]
+    pair = ClusterEntity::NvPair.new('id2', 'name2')
+    @nvSet << pair
+    pairs << pair
+    pair = ClusterEntity::NvPair.new('id3', 'name3')
+    @nvSet << pair
+    pairs << pair
+    pairs2 = pairs.dup
+    counter = 0
+    @nvSet.each { |e|
+      assert_equal(pairs.delete(e), e)
+      counter += 1
+    }
+    assert_equal(3, counter)
+    pairs2.delete(@nvSet.delete('name2'))
+    counter = 0
+    @nvSet.each { |e|
+      assert_equal(pairs2.delete(e), e)
+      counter += 1
+    }
+    assert_equal(2, counter)
+  end
+
+  def test_add
+    @nvSet << ClusterEntity::NvPair.new('id1', 'name1', 'value1')
+    counter = 0
+    @nvSet.each { |e|
+      assert_equal('id1', e.id)
+      assert_equal('name1', e.name)
+      assert_equal('value1', e.value)
+      counter += 1
+    }
+    assert_equal(1, counter)
+
+    @nvSet << ClusterEntity::NvPair.new('id2', 'name2', 'value2')
+    map = {
+      'id1' => {
+        :id => 'id1',
+        :name => 'name1',
+        :value => 'value1'
+      },
+      'id2' => {
+        :id => 'id2',
+        :name => 'name2',
+        :value => 'value2'
+      }
+    }
+    counter = 0
+    @nvSet.each { |e|
+      assert_equal(map[e.id][:id], e.id)
+      assert_equal(map[e.id][:name], e.name)
+      assert_equal(map[e.id][:value], e.value)
+      counter += 1
+    }
+    assert_equal(2, counter)
+
+    @nvSet << ClusterEntity::NvPair.new('id22', 'name2', 'value22')
+    map = {
+      'id1' => {
+        :id => 'id1',
+        :name => 'name1',
+        :value => 'value1'
+      },
+      'id22' => {
+        :id => 'id22',
+        :name => 'name2',
+        :value => 'value22'
+      }
+    }
+    counter = 0
+    @nvSet.each { |e|
+      assert_equal(map[e.id][:id], e.id)
+      assert_equal(map[e.id][:name], e.name)
+      assert_equal(map[e.id][:value], e.value)
+      counter += 1
+    }
+    assert_equal(2, counter)
+
+    assert_raise(ArgumentError) {
+      @nvSet << "not NvPair"
+    }
+
+    assert_raise(ArgumentError) {
+      @nvSet << nil
+    }
+  end
+
+  def test_include?
+    assert_equal(false, @nvSet.include?(nil))
+    assert_equal(false, @nvSet.include?('name1'))
+    assert_equal(false, @nvSet.include?('name2'))
+    assert_equal(false, @nvSet.include?('name3'))
+    @nvSet << ClusterEntity::NvPair.new('id1', 'name1', 'value1')
+    assert_equal(false, @nvSet.include?(nil))
+    assert(@nvSet.include?('name1'))
+    assert_equal(false, @nvSet.include?('name2'))
+    assert_equal(false, @nvSet.include?('name3'))
+    @nvSet << ClusterEntity::NvPair.new('id2', 'name2', 'value2')
+    assert_equal(false, @nvSet.include?(nil))
+    assert(@nvSet.include?('name1'))
+    assert(@nvSet.include?('name2'))
+    assert_equal(false, @nvSet.include?('name3'))
+    @nvSet << ClusterEntity::NvPair.new('id22', 'name2', 'value22')
+    assert_equal(false, @nvSet.include?(nil))
+    assert(@nvSet.include?('name1'))
+    assert(@nvSet.include?('name2'))
+    assert_equal(false, @nvSet.include?('name3'))
+  end
+
+  def test_indexer
+    assert_nil(@nvSet[nil])
+    assert_nil(@nvSet['name1'])
+    @nvSet << ClusterEntity::NvPair.new('id1', 'name1', 'value1')
+    assert_nil(@nvSet[nil])
+    assert_nil(@nvSet['name2'])
+    assert_equal('id1', @nvSet['name1'].id)
+    assert_equal('name1', @nvSet['name1'].name)
+    assert_equal('value1', @nvSet['name1'].value)
+    @nvSet << ClusterEntity::NvPair.new('id2', 'name2', 'value2')
+    assert_equal('id1', @nvSet['name1'].id)
+    assert_equal('name1', @nvSet['name1'].name)
+    assert_equal('value1', @nvSet['name1'].value)
+    assert_equal('id2', @nvSet['name2'].id)
+    assert_equal('name2', @nvSet['name2'].name)
+    assert_equal('value2', @nvSet['name2'].value)
+    assert_nil(@nvSet[nil])
+    assert_nil(@nvSet['name3'])
+  end
+
+  def test_delete
+    assert_nil(@nvSet.delete(nil))
+    assert_nil(@nvSet.delete('name1'))
+    @nvSet << ClusterEntity::NvPair.new('id1', 'name1', 'value1')
+    @nvSet << ClusterEntity::NvPair.new('id2', 'name2', 'value2')
+    @nvSet << ClusterEntity::NvPair.new('id3', 'name3', 'value3')
+    p = @nvSet.delete('name2')
+    assert_equal(false, @nvSet.include?('name2'))
+    assert_equal('id2', p.id)
+    assert_equal('name2', p.name)
+    assert_equal('value2', p.value)
+    assert(@nvSet.include?('name1'))
+    assert(@nvSet.include?('name3'))
+    @nvSet.delete('name3')
+    assert_equal(false, @nvSet.include?('name3'))
+    assert_nil(@nvSet.delete('name2'))
+  end
+end
+
+class TestResourceStatus < Test::Unit::TestCase
+  def setup
+    @cib = REXML::Document.new(File.read(File.join(CURRENT_DIR, CIB_FILE)))
+    @crm_mon = REXML::Document.new(File.read(File.join(CURRENT_DIR, CRM_FILE)))
+  end
+
+  def test_init
+    s = ClusterEntity::CRMResourceStatus.new(@crm_mon.elements["//resource[@id='dummy1']"])
+    assert_equal('dummy1', s.id)
+    assert_equal('ocf::heartbeat:Dummy', s.resource_agent)
+    assert(s.managed)
+    assert_equal(false, s.failed)
+    assert_equal('Started', s.role)
+    assert(s.active)
+    assert_equal(false, s.orphaned)
+    assert_equal(false, s.failure_ignored)
+    assert_equal(s.nodes_running_on, 1)
+    assert_nil(s.pending)
+    node = {
+      :name => 'node1',
+      :id => '1',
+      :cached => false
+    }
+    assert(node == s.node)
+  end
+
+  def test_init_invalid_element
+    xml = "<primitive id='dummy1'/>"
+    s = ClusterEntity::CRMResourceStatus.new(REXML::Document.new(xml))
+    assert_nil(s.id)
+    assert_nil(s.resource_agent)
+    assert_equal(false, s.managed)
+    assert_equal(false, s.failed)
+    assert_nil(s.role)
+    assert_equal(false, s.active)
+    assert_equal(false, s.orphaned)
+    assert_equal(false, s.failure_ignored)
+    assert_equal(s.nodes_running_on, 0)
+    assert_nil(s.pending)
+    assert_nil(s.node)
+  end
+end
+
+class TestResourceOperation < Test::Unit::TestCase
+  def setup
+    @cib = REXML::Document.new(File.read(File.join(CURRENT_DIR, CIB_FILE)))
+    @crm_mon = REXML::Document.new(File.read(File.join(CURRENT_DIR, CRM_FILE)))
+  end
+
+  def test_init
+    o = ClusterEntity::ResourceOperation.new(@cib.elements["//lrm_rsc_op[@id='dummy1_last_0']"])
+    assert_equal(123, o.call_id)
+    assert_equal('build_active_RAs', o.crm_debug_origin)
+    assert_equal('3.0.9', o.crm_feature_set)
+    assert_equal(21, o.exec_time)
+    assert_nil(o.exit_reason)
+    assert_equal('dummy1_last_0', o.id)
+    assert_equal(0, o.interval)
+    assert_equal(1436002943, o.last_rc_change)
+    assert_equal(1436002943, o.last_run)
+    assert_equal('node3', o.on_node)
+    assert_equal('07c70cdfaab292cf9afd6ca7c583b7ff', o.op_digest)
+    assert_equal('dummy1_stop_0', o.operation_key)
+    assert_equal('stop', o.operation)
+    assert_nil(o.op_force_restart)
+    assert_nil(o.op_restart_digest)
+    assert_equal(0, o.op_status)
+    assert_equal(0, o.queue_time)
+    assert_equal(0, o.rc_code)
+    assert_equal('36:2:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6', o.transition_key)
+    assert_equal('0:0;36:2:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6', o.transition_magic)
+  end
+end
+
+class TestPrimitive < Test::Unit::TestCase
+  def setup
+    @cib = REXML::Document.new(File.read(File.join(CURRENT_DIR, CIB_FILE)))
+    @crm_mon = REXML::Document.new(File.read(File.join(CURRENT_DIR, CRM_FILE)))
+  end
+
+  def test_init
+    obj = ClusterEntity::Primitive.new(@cib.elements["//primitive[@id='dummy1']"])
+    assert_nil(obj.parent)
+    assert_nil(obj.get_master)
+    assert_nil(obj.get_clone)
+    assert_nil(obj.get_group)
+    meta_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'dummy1-meta_attributes-testattr',
+      'testattr',
+      '0'
+    ) << ClusterEntity::NvPair.new(
+      'dummy1-meta_attributes-attr2',
+      'attr2',
+      '10'
+    )
+    assert_equal_NvSet(meta_attr, obj.meta_attr)
+    utilization = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'dummy1-utilization-test_name',
+      'test_name',
+      '-10'
+    ) << ClusterEntity::NvPair.new(
+      'dummy1-utilization-another_one',
+      'another_one',
+      '0'
+    )
+    assert_equal_NvSet(utilization, obj.utilization)
+    assert_equal('dummy1', obj.id)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_equal('ocf::heartbeat:Dummy', obj.agentname)
+    assert_equal('ocf', obj._class)
+    assert_equal('heartbeat', obj.provider)
+    assert_equal('Dummy', obj.type)
+    assert_equal(false, obj.stonith)
+    instance_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'dummy1-instance_attributes-fake',
+      'fake',
+      '--test'
+    )
+    assert_equal_NvSet(instance_attr, obj.instance_attr)
+    assert(obj.crm_status.empty?)
+    assert(obj.operations.empty?)
+  end
+
+  def test_init_with_crm
+    obj = ClusterEntity::Primitive.new(
+      @cib.elements["//primitive[@id='dummy1']"],
+      ClusterEntity::get_rsc_status(@crm_mon)
+    )
+    assert_nil(obj.parent)
+    assert_nil(obj.get_master)
+    assert_nil(obj.get_clone)
+    assert_nil(obj.get_group)
+    meta_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'dummy1-meta_attributes-testattr',
+      'testattr',
+      '0'
+    ) << ClusterEntity::NvPair.new(
+      'dummy1-meta_attributes-attr2',
+      'attr2',
+      '10'
+    )
+    assert_equal_NvSet(meta_attr, obj.meta_attr)
+    utilization = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'dummy1-utilization-test_name',
+      'test_name',
+      '-10'
+    ) << ClusterEntity::NvPair.new(
+      'dummy1-utilization-another_one',
+      'another_one',
+      '0'
+    )
+    assert_equal_NvSet(utilization, obj.utilization)
+    assert_equal('dummy1', obj.id)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_equal('ocf::heartbeat:Dummy', obj.agentname)
+    assert_equal('ocf', obj._class)
+    assert_equal('heartbeat', obj.provider)
+    assert_equal('Dummy', obj.type)
+    assert_equal(false, obj.stonith)
+    instance_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'dummy1-instance_attributes-fake',
+      'fake',
+      '--test'
+    )
+    assert_equal_NvSet(instance_attr, obj.instance_attr)
+    assert(obj.operations.empty?)
+    assert_equal(1, obj.crm_status.length)
+
+    # ResourceStatus
+    s = obj.crm_status[0]
+    assert_instance_of(ClusterEntity::CRMResourceStatus, s)
+    assert_equal('dummy1', s.id)
+    assert_equal('ocf::heartbeat:Dummy', s.resource_agent)
+    assert(s.managed)
+    assert_equal(false, s.failed)
+    assert_equal('Started', s.role)
+    assert(s.active)
+    assert_equal(false, s.orphaned)
+    assert_equal(false, s.failure_ignored)
+    assert_equal(s.nodes_running_on, 1)
+    assert_nil(s.pending)
+    node = {
+      :name => 'node1',
+      :id => '1',
+      :cached => false
+    }
+    assert(node == s.node)
+  end
+
+  def test_init_with_crm_and_operations
+    obj = ClusterEntity::Primitive.new(
+      @cib.elements["//primitive[@id='dummy1']"],
+      ClusterEntity::get_rsc_status(@crm_mon),
+      nil,
+      ClusterEntity::get_resources_operations(@cib)
+    )
+    assert_nil(obj.parent)
+    assert_nil(obj.get_master)
+    assert_nil(obj.get_clone)
+    assert_nil(obj.get_group)
+    meta_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'dummy1-meta_attributes-testattr',
+      'testattr',
+      '0'
+    ) << ClusterEntity::NvPair.new(
+      'dummy1-meta_attributes-attr2',
+      'attr2',
+      '10'
+    )
+    utilization = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'dummy1-utilization-test_name',
+      'test_name',
+      '-10'
+    ) << ClusterEntity::NvPair.new(
+      'dummy1-utilization-another_one',
+      'another_one',
+      '0'
+    )
+    assert_equal_NvSet(utilization, obj.utilization)
+    assert_equal_NvSet(meta_attr, obj.meta_attr)
+    assert_equal('dummy1', obj.id)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_equal('ocf::heartbeat:Dummy', obj.agentname)
+    assert_equal('ocf', obj._class)
+    assert_equal('heartbeat', obj.provider)
+    assert_equal('Dummy', obj.type)
+    assert_equal(false, obj.stonith)
+    instance_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'dummy1-instance_attributes-fake',
+      'fake',
+      '--test'
+    )
+    assert_equal_NvSet(instance_attr, obj.instance_attr)
+    assert_equal(1, obj.crm_status.length)
+    assert_equal(4, obj.operations.length)
+
+    # ResourceStatus
+    s = obj.crm_status[0]
+    assert_instance_of(ClusterEntity::CRMResourceStatus, s)
+    assert_equal('dummy1', s.id)
+    assert_equal('ocf::heartbeat:Dummy', s.resource_agent)
+    assert(s.managed)
+    assert_equal(false, s.failed)
+    assert_equal('Started', s.role)
+    assert(s.active)
+    assert_equal(false, s.orphaned)
+    assert_equal(false, s.failure_ignored)
+    assert_equal(s.nodes_running_on, 1)
+    assert_nil(s.pending)
+    node = {
+      :name => 'node1',
+      :id => '1',
+      :cached => false
+    }
+    assert(node == s.node)
+
+    # ResourceOperation
+    assert_equal('running', obj.status.to_s)
+    o = obj.operations[0]
+    assert_instance_of(ClusterEntity::ResourceOperation, o)
+    assert_equal(123, o.call_id)
+    assert_equal('build_active_RAs', o.crm_debug_origin)
+    assert_equal('3.0.9', o.crm_feature_set)
+    assert_equal(21, o.exec_time)
+    assert_nil(o.exit_reason)
+    assert_equal('dummy1_last_0', o.id)
+    assert_equal(0, o.interval)
+    assert_equal(1436002943, o.last_rc_change)
+    assert_equal(1436002943, o.last_run)
+    assert_equal('node3', o.on_node)
+    assert_equal('07c70cdfaab292cf9afd6ca7c583b7ff', o.op_digest)
+    assert_equal('dummy1_stop_0', o.operation_key)
+    assert_equal('stop', o.operation)
+    assert_nil(o.op_force_restart)
+    assert_nil(o.op_restart_digest)
+    assert_equal(0, o.op_status)
+    assert_equal(0, o.queue_time)
+    assert_equal(0, o.rc_code)
+    assert_equal('36:2:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6', o.transition_key)
+    assert_equal('0:0;36:2:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6', o.transition_magic)
+
+    o = obj.operations[1]
+    assert_instance_of(ClusterEntity::ResourceOperation, o)
+    assert_equal('dummy1_last_0', o.id)
+    assert_equal('16d989b809c6743cad46d0d12b8a9262', o.op_digest)
+
+    o = obj.operations[2]
+    assert_instance_of(ClusterEntity::ResourceOperation, o)
+    assert_equal('dummy1_monitor_10000', o.id)
+    assert_equal('c94db5a1993f190ecfd975fd8fe499b3', o.op_digest)
+
+    o = obj.operations[3]
+    assert_instance_of(ClusterEntity::ResourceOperation, o)
+    assert_equal('dummy1_last_0', o.id)
+    assert_equal('07c70cdfaab292cf9afd6ca7c583b7ff', o.op_digest)
+  end
+
+  def test_init_with_operations
+    obj = ClusterEntity::Primitive.new(
+      @cib.elements["//primitive[@id='dummy1']"],
+      nil,
+      nil,
+      ClusterEntity::get_resources_operations(@cib)
+    )
+    assert_nil(obj.parent)
+    assert_nil(obj.get_master)
+    assert_nil(obj.get_clone)
+    assert_nil(obj.get_group)
+    meta_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'dummy1-meta_attributes-testattr',
+      'testattr',
+      '0'
+    ) << ClusterEntity::NvPair.new(
+      'dummy1-meta_attributes-attr2',
+      'attr2',
+      '10'
+    )
+    utilization = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'dummy1-utilization-test_name',
+      'test_name',
+      '-10'
+    ) << ClusterEntity::NvPair.new(
+      'dummy1-utilization-another_one',
+      'another_one',
+      '0'
+    )
+    assert_equal_NvSet(utilization, obj.utilization)
+    assert_equal_NvSet(meta_attr, obj.meta_attr)
+    assert_equal('dummy1', obj.id)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_equal('ocf::heartbeat:Dummy', obj.agentname)
+    assert_equal('ocf', obj._class)
+    assert_equal('heartbeat', obj.provider)
+    assert_equal('Dummy', obj.type)
+    assert_equal(false, obj.stonith)
+    instance_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'dummy1-instance_attributes-fake',
+      'fake',
+      '--test'
+    )
+    assert_equal_NvSet(instance_attr, obj.instance_attr)
+    assert(obj.crm_status.empty?)
+    assert_equal(4, obj.operations.length)
+
+    # ResourceOperation
+    o = obj.operations[0]
+    assert_instance_of(ClusterEntity::ResourceOperation, o)
+    assert_equal('dummy1_last_0', o.id)
+    assert_equal(123, o.call_id)
+    assert_equal('build_active_RAs', o.crm_debug_origin)
+    assert_equal('3.0.9', o.crm_feature_set)
+    assert_equal(21, o.exec_time)
+    assert_nil(o.exit_reason)
+    assert_equal('dummy1_last_0', o.id)
+    assert_equal(0, o.interval)
+    assert_equal(1436002943, o.last_rc_change)
+    assert_equal(1436002943, o.last_run)
+    assert_equal('node3', o.on_node)
+    assert_equal('07c70cdfaab292cf9afd6ca7c583b7ff', o.op_digest)
+    assert_equal('dummy1_stop_0', o.operation_key)
+    assert_equal('stop', o.operation)
+    assert_nil(o.op_force_restart)
+    assert_nil(o.op_restart_digest)
+    assert_equal(0, o.op_status)
+    assert_equal(0, o.queue_time)
+    assert_equal(0, o.rc_code)
+    assert_equal('36:2:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6', o.transition_key)
+    assert_equal('0:0;36:2:0:c4cdc0be-a153-421e-b1f9-d78eee41c0b6', o.transition_magic)
+
+    o = obj.operations[1]
+    assert_instance_of(ClusterEntity::ResourceOperation, o)
+    assert_equal('dummy1_last_0', o.id)
+    assert_equal('16d989b809c6743cad46d0d12b8a9262', o.op_digest)
+
+    o = obj.operations[2]
+    assert_instance_of(ClusterEntity::ResourceOperation, o)
+    assert_equal('dummy1_monitor_10000', o.id)
+    assert_equal('c94db5a1993f190ecfd975fd8fe499b3', o.op_digest)
+
+    o = obj.operations[3]
+    assert_instance_of(ClusterEntity::ResourceOperation, o)
+    assert_equal('dummy1_last_0', o.id)
+    assert_equal('07c70cdfaab292cf9afd6ca7c583b7ff', o.op_digest)
+  end
+
+  def test_init_nil
+    obj = ClusterEntity::Primitive.new(nil, nil, 'parent', nil)
+    assert_equal('parent', obj.parent)
+    assert_nil(obj.get_master)
+    assert_nil(obj.get_clone)
+    assert_nil(obj.get_group)
+    assert(obj.meta_attr.empty?)
+    assert_nil(obj.id)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_nil(obj.agentname)
+    assert_nil(obj._class)
+    assert_nil(obj.provider)
+    assert_nil(obj.type)
+    assert_equal(false, obj.stonith)
+    assert(obj.instance_attr.empty?)
+    assert(obj.crm_status.empty?)
+    assert(obj.operations.empty?)
+  end
+
+  def test_init_invalid_element
+    xml ='<empty_document/>'
+    obj = ClusterEntity::Primitive.new(REXML::Document.new(xml))
+    assert_nil(obj.parent)
+    assert_nil(obj.get_master)
+    assert_nil(obj.get_clone)
+    assert_nil(obj.get_group)
+    assert(obj.meta_attr.empty?)
+    assert_nil(obj.id)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_nil(obj.agentname)
+    assert_nil(obj._class)
+    assert_nil(obj.provider)
+    assert_nil(obj.type)
+    assert_equal(false, obj.stonith)
+    assert(obj.instance_attr.empty?)
+    assert(obj.crm_status.empty?)
+    assert(obj.operations.empty?)
+  end
+
+  def test_init_empty_element
+    xml ='<primitive/>'
+    obj = ClusterEntity::Primitive.new(REXML::Document.new(xml).elements['primitive'])
+    assert_nil(obj.parent)
+    assert_nil(obj.get_master)
+    assert_nil(obj.get_clone)
+    assert_nil(obj.get_group)
+    assert(obj.meta_attr.empty?)
+    assert_nil(obj.id)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_nil(obj.agentname)
+    assert_nil(obj._class)
+    assert_nil(obj.provider)
+    assert_nil(obj.type)
+    assert_equal(false, obj.stonith)
+    assert(obj.instance_attr.empty?)
+    assert(obj.crm_status.empty?)
+    assert(obj.operations.empty?)
+  end
+
+  def test_init_empty_element_with_crm
+    xml ='<primitive/>'
+    obj = ClusterEntity::Primitive.new(
+      REXML::Document.new(xml).elements['primitive'],
+      ClusterEntity::get_rsc_status(@crm_mon)
+    )
+    assert_nil(obj.parent)
+    assert_nil(obj.get_master)
+    assert_nil(obj.get_clone)
+    assert_nil(obj.get_group)
+    assert(obj.meta_attr.empty?)
+    assert_nil(obj.id)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_nil(obj.agentname)
+    assert_nil(obj._class)
+    assert_nil(obj.provider)
+    assert_nil(obj.type)
+    assert_equal(false, obj.stonith)
+    assert(obj.instance_attr.empty?)
+    assert(obj.crm_status.empty?)
+    assert(obj.operations.empty?)
+  end
+
+  def test_init_stonith
+    obj = ClusterEntity::Primitive.new(@cib.elements["//primitive[@id='node1-stonith']"])
+    assert_nil(obj.parent)
+    assert_nil(obj.get_master)
+    assert_nil(obj.get_clone)
+    assert_nil(obj.get_group)
+    assert(obj.meta_attr.empty?)
+    assert_equal('node1-stonith', obj.id)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_equal('stonith:fence_xvm', obj.agentname)
+    assert_equal('stonith', obj._class)
+    assert_nil(obj.provider)
+    assert_equal('fence_xvm', obj.type)
+    assert(obj.stonith)
+    instance_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'node1-stonith-instance_attributes-domain',
+      'domain',
+      'node1'
+    )
+    assert_equal_NvSet(instance_attr, obj.instance_attr)
+    assert(obj.crm_status.empty?)
+    assert(obj.operations.empty?)
+  end
+
+  def test_init_stonith_with_crm
+    obj = ClusterEntity::Primitive.new(
+      @cib.elements["//primitive[@id='node1-stonith']"],
+      ClusterEntity::get_rsc_status(@crm_mon)
+    )
+    assert_nil(obj.parent)
+    assert_nil(obj.get_master)
+    assert_nil(obj.get_clone)
+    assert_nil(obj.get_group)
+    assert(obj.meta_attr.empty?)
+    assert_equal('node1-stonith', obj.id)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_equal('stonith:fence_xvm', obj.agentname)
+    assert_equal('stonith', obj._class)
+    assert_nil(obj.provider)
+    assert_equal('fence_xvm', obj.type)
+    assert(obj.stonith)
+    instance_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'node1-stonith-instance_attributes-domain',
+      'domain',
+      'node1'
+    )
+    assert_equal_NvSet(instance_attr, obj.instance_attr)
+    assert(obj.operations.empty?)
+
+    # ResourceStatus
+    s = obj.crm_status[0]
+    assert_equal('node1-stonith', s.id)
+    assert_equal('stonith:fence_xvm', s.resource_agent)
+    assert(s.managed)
+    assert_equal(false, s.failed)
+    assert_equal('Started', s.role)
+    assert(s.active)
+    assert_equal(false, s.orphaned)
+    assert_equal(false, s.failure_ignored)
+    assert_equal(s.nodes_running_on, 1)
+    assert_nil(s.pending)
+    node = {
+      :name => 'node3',
+      :id => '3',
+      :cached => false
+    }
+    assert(node == s.node)
+  end
+
+  def test_to_status_version1
+    obj = ClusterEntity::Primitive.new(
+      @cib.elements["//primitive[@id='dummy1']"],
+      ClusterEntity::get_rsc_status(@crm_mon)
+    )
+    json = '[{
+      "id": "dummy1",
+      "agentname": "ocf::heartbeat:Dummy",
+      "active": true,
+      "nodes": [
+        "node1"
+      ],
+      "group": null,
+      "clone": false,
+      "clone_id": null,
+      "ms_id": null,
+      "failed": false,
+      "orphaned": false,
+      "options": {
+        "fake": "--test"
+      },
+      "stonith": false,
+      "ms": false,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {
+        "fake": "--test"
+      },
+      "meta_attr": [
+        {
+          "key": "testattr",
+          "value": "0",
+          "id": "dummy1-meta_attributes-testattr",
+          "parent": "dummy1"
+        },
+        {
+          "key": "attr2",
+          "value": "10",
+          "id": "dummy1-meta_attributes-attr2",
+          "parent": "dummy1"
+        }
+      ]
+    }]'
+    hash = JSON.parse(json, {:symbolize_names => true})
+    assert(hash == obj.to_status)
+  end
+
+  def test_to_status_version1_no_crm
+    obj = ClusterEntity::Primitive.new(@cib.elements["//primitive[@id='dummy1']"])
+    json = '[{
+      "id": "dummy1",
+      "agentname": "ocf::heartbeat:Dummy",
+      "active": false,
+      "nodes": [
+      ],
+      "group": null,
+      "clone": false,
+      "clone_id": null,
+      "ms_id": null,
+      "failed": false,
+      "orphaned": false,
+      "options": {
+        "fake": "--test"
+      },
+      "stonith": false,
+      "ms": false,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {
+        "fake": "--test"
+      },
+      "meta_attr": [
+        {
+          "key": "testattr",
+          "value": "0",
+          "id": "dummy1-meta_attributes-testattr",
+          "parent": "dummy1"
+        },
+        {
+          "key": "attr2",
+          "value": "10",
+          "id": "dummy1-meta_attributes-attr2",
+          "parent": "dummy1"
+        }
+      ]
+    }]'
+    hash = JSON.parse(json, {:symbolize_names => true})
+    assert(hash == obj.to_status)
+  end
+
+  def test_to_status_version2
+    obj = ClusterEntity::Primitive.new(
+      @cib.elements["//primitive[@id='dummy1']"],
+      ClusterEntity::get_rsc_status(@crm_mon)
+    )
+    json = '{
+      "id": "dummy1",
+      "meta_attr": [
+        {
+          "id": "dummy1-meta_attributes-testattr",
+          "name": "testattr",
+          "value": "0"
+        },
+        {
+          "id": "dummy1-meta_attributes-attr2",
+          "name": "attr2",
+          "value": "10"
+        }
+      ],
+      "utilization": [
+        {
+          "id": "dummy1-utilization-test_name",
+          "name": "test_name",
+          "value": "-10"
+        },
+        {
+          "id": "dummy1-utilization-another_one",
+          "name": "another_one",
+          "value": "0"
+        }
+      ],
+      "error_list": [],
+      "warning_list": [],
+      "class_type": "primitive",
+      "parent_id": null,
+      "disabled": false,
+      "agentname": "ocf::heartbeat:Dummy",
+      "provider": "heartbeat",
+      "type": "Dummy",
+      "stonith": false,
+      "instance_attr": [
+        {
+          "id": "dummy1-instance_attributes-fake",
+          "name": "fake",
+          "value": "--test"
+        }
+      ],
+      "status": "running",
+      "class": "ocf",
+      "crm_status": [
+        {
+          "id": "dummy1",
+          "resource_agent": "ocf::heartbeat:Dummy",
+          "managed": true,
+          "failed": false,
+          "role": "Started",
+          "active": true,
+          "orphaned": false,
+          "failure_ignored": false,
+          "nodes_running_on": 1,
+          "pending": null,
+          "node": {
+            "name": "node1",
+            "id": "1",
+            "cached": false
+          }
+        }
+      ],
+      "operations": []
+    }'
+    hash = JSON.parse(json, {:symbolize_names => true})
+    # assert_equal_hashes(hash, obj.to_status('2'))
+    assert(hash == obj.to_status('2'))
+  end
+end
+
+class TestGroup < Test::Unit::TestCase
+  def setup
+    @cib = REXML::Document.new(File.read(File.join(CURRENT_DIR, CIB_FILE)))
+    @crm_mon = REXML::Document.new(File.read(File.join(CURRENT_DIR, CRM_FILE)))
+  end
+
+  def test_init
+    obj = ClusterEntity::Group.new(@cib.elements["//group[@id='group1']"])
+    assert_equal('group1', obj.id)
+    assert_nil(obj.parent)
+    assert_nil(obj.get_master)
+    assert_nil(obj.get_clone)
+    assert_nil(obj.get_group)
+    meta_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'group1-meta_attributes-c',
+      'c',
+      '1'
+    ) << ClusterEntity::NvPair.new(
+      'group1-meta_attributes-aaa',
+      'aaa',
+      '333'
+    )
+    assert_equal_NvSet(meta_attr, obj.meta_attr)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_equal(2, obj.members.length)
+    m = obj.members
+    assert_instance_of(ClusterEntity::Primitive, m[0])
+    assert_nil(m[0].get_master)
+    assert_nil(m[0].get_clone)
+    assert_equal(obj.id, m[0].get_group)
+    assert_equal('dummy3', m[0].id)
+    assert_equal(obj, m[0].parent)
+    assert(m[0].crm_status.empty?)
+    assert_instance_of(ClusterEntity::Primitive, m[1])
+    assert_nil(m[1].get_master)
+    assert_nil(m[1].get_clone)
+    assert_equal(obj.id, m[1].get_group)
+    assert_equal('dummy4', m[1].id)
+    assert_equal(obj, m[1].parent)
+    assert(m[1].crm_status.empty?)
+  end
+
+  def test_init_with_crm
+    obj = ClusterEntity::Group.new(
+      @cib.elements["//group[@id='group1']"],
+      ClusterEntity::get_rsc_status(@crm_mon)
+    )
+    assert_equal('group1', obj.id)
+    assert_nil(obj.parent)
+    assert_nil(obj.get_master)
+    assert_nil(obj.get_clone)
+    assert_nil(obj.get_group)
+    meta_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'group1-meta_attributes-c',
+      'c',
+      '1'
+    ) << ClusterEntity::NvPair.new(
+      'group1-meta_attributes-aaa',
+      'aaa',
+      '333'
+    )
+    assert_equal_NvSet(meta_attr, obj.meta_attr)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_equal(2, obj.members.length)
+
+    m = obj.members
+    assert_instance_of(ClusterEntity::Primitive, m[0])
+    assert_instance_of(ClusterEntity::Primitive, m[1])
+    assert_equal('dummy3', m[0].id)
+    assert_equal(obj, m[0].parent)
+    assert_equal('dummy4', m[1].id)
+    assert_equal(obj, m[1].parent)
+    assert_equal(1, m[0].crm_status.length)
+    assert_equal(1, m[1].crm_status.length)
+    assert_nil(m[0].get_master)
+    assert_nil(m[0].get_clone)
+    assert_equal(obj.id, m[0].get_group)
+    assert_nil(m[1].get_master)
+    assert_nil(m[1].get_clone)
+    assert_equal(obj.id, m[1].get_group)
+  end
+
+  def test_init_invalid_element
+    xml = '<primitive id="dummy1"/>'
+    obj = ClusterEntity::Group.new(REXML::Document.new(xml).elements['*'])
+    assert_nil(obj.id)
+    assert_nil(obj.parent)
+    assert_nil(obj.get_master)
+    assert_nil(obj.get_clone)
+    assert_nil(obj.get_group)
+    assert(obj.meta_attr.empty?)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert(obj.members.empty?)
+  end
+
+  def test_init_empty_element
+    xml = '<group id="group"/>'
+    obj = ClusterEntity::Group.new(REXML::Document.new(xml).elements['*'])
+    assert_equal('group', obj.id)
+    assert_nil(obj.parent)
+    assert_nil(obj.get_master)
+    assert_nil(obj.get_clone)
+    assert_nil(obj.get_group)
+    assert(obj.meta_attr.empty?)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert(obj.members.empty?)
+  end
+
+  def test_to_status_version1
+    obj = ClusterEntity::Group.new(
+      @cib.elements["//group[@id='group1']"],
+      ClusterEntity::get_rsc_status(@crm_mon)
+    )
+    json = '[{
+      "id": "dummy3",
+      "agentname": "ocf::heartbeat:Dummy",
+      "active": true,
+      "nodes": [
+        "node3"
+      ],
+      "group": "group1",
+      "clone": false,
+      "clone_id": null,
+      "ms_id": null,
+      "failed": false,
+      "orphaned": false,
+      "options": {},
+      "stonith": false,
+      "ms": false,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {},
+      "meta_attr": [
+        {
+          "key": "c",
+          "value": "1",
+          "id": "group1-meta_attributes-c",
+          "parent": "group1"
+        },
+        {
+          "key": "aaa",
+          "value": "333",
+          "id": "group1-meta_attributes-aaa",
+          "parent": "group1"
+        },
+        {
+          "key": "b",
+          "value": "3",
+          "id": "dummy3-meta_attributes-b",
+          "parent": "dummy3"
+        }
+      ]
+    },
+    {
+      "id": "dummy4",
+      "agentname": "ocf::heartbeat:Dummy",
+      "active": true,
+      "nodes": [
+        "node3"
+      ],
+      "group": "group1",
+      "clone": false,
+      "clone_id": null,
+      "ms_id": null,
+      "failed": false,
+      "orphaned": false,
+      "options": {},
+      "stonith": false,
+      "ms": false,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {},
+      "meta_attr": [
+        {
+          "key": "c",
+          "value": "1",
+          "id": "group1-meta_attributes-c",
+          "parent": "group1"
+        },
+        {
+          "key": "aaa",
+          "value": "333",
+          "id": "group1-meta_attributes-aaa",
+          "parent": "group1"
+        },
+        {
+          "key": "b",
+          "value": "4",
+          "id": "dummy4-meta_attributes-b",
+          "parent": "dummy4"
+        }
+      ]
+    }]'
+    hash = JSON.parse(json, {:symbolize_names => true})
+    assert(hash == obj.to_status)
+  end
+
+  def test_to_status_version2
+    obj = ClusterEntity::Group.new(
+      @cib.elements["//group[@id='group1']"],
+      ClusterEntity::get_rsc_status(@crm_mon)
+    )
+    json = '{
+      "id": "group1",
+      "meta_attr": [
+        {
+          "id": "group1-meta_attributes-c",
+          "name": "c",
+          "value": "1"
+        },
+        {
+          "id": "group1-meta_attributes-aaa",
+          "name": "aaa",
+          "value": "333"
+        }
+      ],
+      "error_list": [],
+      "warning_list": [],
+      "class_type": "group",
+      "parent_id": null,
+      "disabled": false,
+      "status": "running",
+      "members": [
+        {
+          "id": "dummy3",
+          "meta_attr": [
+            {
+              "id": "dummy3-meta_attributes-aaa",
+              "name": "aaa",
+              "value": "111"
+            },
+            {
+              "id": "dummy3-meta_attributes-b",
+              "name": "b",
+              "value": "3"
+            }
+          ],
+          "utilization": [],
+          "error_list": [],
+          "warning_list": [],
+          "class_type": "primitive",
+          "parent_id": "group1",
+          "disabled": false,
+          "agentname": "ocf::heartbeat:Dummy",
+          "provider": "heartbeat",
+          "type": "Dummy",
+          "stonith": false,
+          "instance_attr": [],
+          "status": "running",
+          "class": "ocf",
+          "crm_status": [
+            {
+              "id": "dummy3",
+              "resource_agent": "ocf::heartbeat:Dummy",
+              "managed": true,
+              "failed": false,
+              "role": "Started",
+              "active": true,
+              "orphaned": false,
+              "failure_ignored": false,
+              "nodes_running_on": 1,
+              "pending": null,
+              "node": {
+                "name": "node3",
+                "id": "3",
+                "cached": false
+              }
+            }
+          ],
+          "operations": []
+        },
+        {
+          "id": "dummy4",
+          "meta_attr": [
+            {
+              "id": "dummy4-meta_attributes-aaa",
+              "name": "aaa",
+              "value": "222"
+            },
+            {
+              "id": "dummy4-meta_attributes-b",
+              "name": "b",
+              "value": "4"
+            }
+          ],
+          "utilization": [],
+          "error_list": [],
+          "warning_list": [],
+          "class_type": "primitive",
+          "parent_id": "group1",
+          "disabled": false,
+          "agentname": "ocf::heartbeat:Dummy",
+          "provider": "heartbeat",
+          "type": "Dummy",
+          "stonith": false,
+          "instance_attr": [],
+          "status": "running",
+          "class": "ocf",
+          "crm_status": [
+            {
+              "id": "dummy4",
+              "resource_agent": "ocf::heartbeat:Dummy",
+              "managed": true,
+              "failed": false,
+              "role": "Started",
+              "active": true,
+              "orphaned": false,
+              "failure_ignored": false,
+              "nodes_running_on": 1,
+              "pending": null,
+              "node": {
+                "name": "node3",
+                "id": "3",
+                "cached": false
+              }
+            }
+          ],
+          "operations": []
+        }
+      ]
+    }'
+    hash = JSON.parse(json, {:symbolize_names => true})
+    assert(hash == obj.to_status('2'))
+  end
+end
+
+class TestClone < Test::Unit::TestCase
+  def setup
+    @cib = REXML::Document.new(File.read(File.join(CURRENT_DIR, CIB_FILE)))
+    @crm_mon = REXML::Document.new(File.read(File.join(CURRENT_DIR, CRM_FILE)))
+  end
+
+  def test_init_invalid_element
+    xml = '<primitve id="dummy"/>'
+    obj = ClusterEntity::Clone.new(REXML::Document.new(xml).elements['*'])
+    assert_nil(obj.id)
+    assert_nil(obj.parent)
+    assert_nil(obj.get_master)
+    assert_nil(obj.get_clone)
+    assert_nil(obj.get_group)
+    assert(obj.meta_attr.empty?)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_nil(obj.member)
+    assert_equal(false, obj.unique)
+    assert_equal(false, obj.managed)
+    assert_equal(false, obj.failed)
+    assert_equal(false, obj.failure_ignored)
+  end
+
+  def test_init_empty_element
+    xml = '<clone id="dummy-clone"/>'
+    obj = ClusterEntity::Clone.new(REXML::Document.new(xml).elements['*'])
+    assert_equal('dummy-clone', obj.id)
+    assert_nil(obj.parent)
+    assert_nil(obj.get_master)
+    assert_nil(obj.get_clone)
+    assert_nil(obj.get_group)
+    assert(obj.meta_attr.empty?)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_nil(obj.member)
+    assert_equal(false, obj.unique)
+    assert_equal(false, obj.managed)
+    assert_equal(false, obj.failed)
+    assert_equal(false, obj.failure_ignored)
+  end
+
+  def test_init_primitive_with_crm
+    obj = ClusterEntity::Clone.new(
+      @cib.elements["//clone[@id='dummy-clone']"],
+      @crm_mon,
+      ClusterEntity::get_rsc_status(@crm_mon)
+    )
+    assert_equal('dummy-clone', obj.id)
+    assert_nil(obj.parent)
+    assert_nil(obj.get_master)
+    assert_nil(obj.get_clone)
+    assert_nil(obj.get_group)
+    meta_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'dummy-clone-meta_attributes-ccc',
+      'ccc',
+      '222'
+    ) << ClusterEntity::NvPair.new(
+      'dummy-clone-meta_attributes-aaa',
+      'aaa',
+      '222'
+    )
+    assert_equal_NvSet(meta_attr, obj.meta_attr)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_not_nil(obj.member)
+    assert_equal(false, obj.unique)
+    assert(obj.managed)
+    assert_equal(false, obj.failed)
+    assert_equal(false, obj.failure_ignored)
+
+    m = obj.member
+    assert_instance_of(ClusterEntity::Primitive, m)
+    assert_equal(obj, m.parent)
+    assert_equal('dummy', m.id)
+    assert_equal(3, m.crm_status.length)
+    assert_nil(m.get_master)
+    assert_equal(obj.id, m.get_clone)
+    assert_nil(m.get_group)
+  end
+
+  def test_init_primitive
+    obj = ClusterEntity::Clone.new(@cib.elements["//clone[@id='dummy-clone']"])
+    assert_equal('dummy-clone', obj.id)
+    assert_nil(obj.parent)
+    assert_nil(obj.get_master)
+    assert_nil(obj.get_clone)
+    assert_nil(obj.get_group)
+    meta_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'dummy-clone-meta_attributes-ccc',
+      'ccc',
+      '222'
+    ) << ClusterEntity::NvPair.new(
+      'dummy-clone-meta_attributes-aaa',
+      'aaa',
+      '222'
+    )
+    assert_equal_NvSet(meta_attr, obj.meta_attr)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_not_nil(obj.member)
+    assert_equal(false, obj.unique)
+    assert_equal(false, obj.managed)
+    assert_equal(false, obj.failed)
+    assert_equal(false, obj.failure_ignored)
+
+    m = obj.member
+    assert_instance_of(ClusterEntity::Primitive, m)
+    assert_equal(obj, m.parent)
+    assert_equal('dummy', m.id)
+    assert(m.crm_status.empty?)
+    assert_nil(m.get_master)
+    assert_equal(obj.id, m.get_clone)
+    assert_nil(m.get_group)
+  end
+
+  def test_init_group_with_crm
+    obj = ClusterEntity::Clone.new(
+      @cib.elements["//clone[@id='group2-clone']"],
+      @crm_mon,
+      ClusterEntity::get_rsc_status(@crm_mon)
+    )
+    meta_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'group2-clone-meta_attributes-a',
+      'a',
+      '1'
+    ) << ClusterEntity::NvPair.new(
+      'group2-clone-meta_attributes-d',
+      'd',
+      '1'
+    )
+    assert_equal_NvSet(meta_attr, obj.meta_attr)
+    assert_nil(obj.parent)
+    assert_nil(obj.get_master)
+    assert_nil(obj.get_clone)
+    assert_nil(obj.get_group)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_not_nil(obj.member)
+    assert_equal(false, obj.unique)
+    assert(obj.managed)
+    assert_equal(false, obj.failed)
+    assert_equal(false, obj.failure_ignored)
+
+    g = obj.member
+    assert_instance_of(ClusterEntity::Group, g)
+    assert_equal(obj, g.parent)
+    assert_equal('group2', g.id)
+    assert_equal(2, g.members.length)
+    assert_nil(g.get_master)
+    assert_equal(obj.id, g.get_clone)
+    assert_nil(g.get_group)
+
+    m = g.members[0]
+    assert_instance_of(ClusterEntity::Primitive, m)
+    assert_equal('dummy6', m.id)
+    assert_nil(g.get_master)
+    assert_equal(obj.id, m.get_clone)
+    assert_equal(g.id, m.get_group)
+
+    m = g.members[1]
+    assert_instance_of(ClusterEntity::Primitive, m)
+    assert_equal('dummy5', m.id)
+    assert_nil(g.get_master)
+    assert_equal(obj.id, m.get_clone)
+    assert_equal(g.id, m.get_group)
+  end
+
+  def test_init_group
+    obj = ClusterEntity::Clone.new(@cib.elements["//clone[@id='group2-clone']"])
+    meta_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'group2-clone-meta_attributes-a',
+      'a',
+      '1'
+    ) << ClusterEntity::NvPair.new(
+      'group2-clone-meta_attributes-d',
+      'd',
+      '1'
+    )
+    assert_equal_NvSet(meta_attr, obj.meta_attr)
+    assert_nil(obj.parent)
+    assert_nil(obj.get_master)
+    assert_nil(obj.get_clone)
+    assert_nil(obj.get_group)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_not_nil(obj.member)
+    assert_equal(false, obj.unique)
+    assert_equal(false, obj.managed)
+    assert_equal(false, obj.failed)
+    assert_equal(false, obj.failure_ignored)
+
+    g = obj.member
+    assert_instance_of(ClusterEntity::Group, g)
+    assert_equal(obj, g.parent)
+    assert_equal('group2', g.id)
+    assert_equal(2, g.members.length)
+    assert_nil(g.get_master)
+    assert_equal(obj.id, g.get_clone)
+    assert_nil(g.get_group)
+
+    m = g.members[0]
+    assert_instance_of(ClusterEntity::Primitive, m)
+    assert_equal('dummy6', m.id)
+    assert_nil(g.get_master)
+    assert_equal(obj.id, m.get_clone)
+    assert_equal(g.id, m.get_group)
+
+    m = g.members[1]
+    assert_instance_of(ClusterEntity::Primitive, m)
+    assert_equal('dummy5', m.id)
+    assert_nil(g.get_master)
+    assert_equal(obj.id, m.get_clone)
+    assert_equal(g.id, m.get_group)
+  end
+
+  def test_to_status_primitive_version1
+    obj = ClusterEntity::Clone.new(
+      @cib.elements["//clone[@id='dummy-clone']"],
+      @crm_mon,
+      ClusterEntity::get_rsc_status(@crm_mon)
+    )
+    json = '[{
+      "id": "dummy",
+      "agentname": "ocf::heartbeat:Dummy",
+      "active": true,
+      "nodes": [
+        "node3"
+      ],
+      "group": null,
+      "clone": true,
+      "clone_id": "dummy-clone",
+      "ms_id": null,
+      "failed": false,
+      "orphaned": false,
+      "options": {},
+      "stonith": false,
+      "ms": false,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {},
+      "meta_attr": [
+        {
+          "key": "aaa",
+          "value": "222",
+          "id": "dummy-clone-meta_attributes-aaa",
+          "parent": "dummy-clone"
+        },
+        {
+          "key": "ccc",
+          "value": "222",
+          "id": "dummy-clone-meta_attributes-ccc",
+          "parent": "dummy-clone"
+        },
+        {
+          "key": "bbb",
+          "value": "111",
+          "id": "dummy-meta_attributes-bbb",
+          "parent": "dummy"
+        }
+      ]
+    },
+    {
+      "id": "dummy",
+      "agentname": "ocf::heartbeat:Dummy",
+      "active": true,
+      "nodes": [
+        "node1"
+      ],
+      "group": null,
+      "clone": true,
+      "clone_id": "dummy-clone",
+      "ms_id": null,
+      "failed": false,
+      "orphaned": false,
+      "options": {},
+      "stonith": false,
+      "ms": false,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {},
+      "meta_attr": [
+        {
+          "key": "aaa",
+          "value": "222",
+          "id": "dummy-clone-meta_attributes-aaa",
+          "parent": "dummy-clone"
+        },
+        {
+          "key": "ccc",
+          "value": "222",
+          "id": "dummy-clone-meta_attributes-ccc",
+          "parent": "dummy-clone"
+        },
+        {
+          "key": "bbb",
+          "value": "111",
+          "id": "dummy-meta_attributes-bbb",
+          "parent": "dummy"
+        }
+      ]
+    },
+    {
+      "id": "dummy",
+      "agentname": "ocf::heartbeat:Dummy",
+      "active": true,
+      "nodes": [
+        "node2"
+      ],
+      "group": null,
+      "clone": true,
+      "clone_id": "dummy-clone",
+      "ms_id": null,
+      "failed": false,
+      "orphaned": false,
+      "options": {},
+      "stonith": false,
+      "ms": false,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {},
+      "meta_attr": [
+        {
+          "key": "aaa",
+          "value": "222",
+          "id": "dummy-clone-meta_attributes-aaa",
+          "parent": "dummy-clone"
+        },
+        {
+          "key": "ccc",
+          "value": "222",
+          "id": "dummy-clone-meta_attributes-ccc",
+          "parent": "dummy-clone"
+        },
+        {
+          "key": "bbb",
+          "value": "111",
+          "id": "dummy-meta_attributes-bbb",
+          "parent": "dummy"
+        }
+      ]
+    }]'
+    hash = JSON.parse(json, {:symbolize_names => true})
+    assert(hash == obj.to_status)
+  end
+
+  def test_to_status_group_version1
+    obj = ClusterEntity::Clone.new(
+      @cib.elements["//clone[@id='group2-clone']"],
+      @crm_mon,
+      ClusterEntity::get_rsc_status(@crm_mon)
+    )
+    json = '[{
+      "id": "dummy6",
+      "agentname": "ocf::heartbeat:Dummy",
+      "active": true,
+      "nodes": [
+        "node3"
+      ],
+      "group": "group2-clone/group2",
+      "clone": true,
+      "clone_id": "group2",
+      "ms_id": null,
+      "failed": false,
+      "orphaned": false,
+      "options": {},
+      "stonith": false,
+      "ms": false,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {},
+      "meta_attr": [
+        {
+          "key": "a",
+          "value": "2",
+          "id": "group2-meta_attributes-a",
+          "parent": "group2"
+        },
+        {
+          "key": "c",
+          "value": "2",
+          "id": "group2-meta_attributes-c",
+          "parent": "group2"
+        },
+        {
+          "key": "d",
+          "value": "2",
+          "id": "group2-meta_attributes-d",
+          "parent": "group2"
+        },
+        {
+          "key": "b",
+          "value": "6",
+          "id": "dummy6-meta_attributes-b",
+          "parent": "dummy6"
+        }
+      ]
+    },
+    {
+      "id": "dummy6",
+      "agentname": "ocf::heartbeat:Dummy",
+      "active": true,
+      "nodes": [
+        "node1"
+      ],
+      "group": "group2-clone/group2",
+      "clone": true,
+      "clone_id": "group2",
+      "ms_id": null,
+      "failed": false,
+      "orphaned": false,
+      "options": {},
+      "stonith": false,
+      "ms": false,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {},
+      "meta_attr": [
+        {
+          "key": "a",
+          "value": "2",
+          "id": "group2-meta_attributes-a",
+          "parent": "group2"
+        },
+        {
+          "key": "c",
+          "value": "2",
+          "id": "group2-meta_attributes-c",
+          "parent": "group2"
+        },
+        {
+          "key": "d",
+          "value": "2",
+          "id": "group2-meta_attributes-d",
+          "parent": "group2"
+        },
+        {
+          "key": "b",
+          "value": "6",
+          "id": "dummy6-meta_attributes-b",
+          "parent": "dummy6"
+        }
+      ]
+    },
+    {
+      "id": "dummy6",
+      "agentname": "ocf::heartbeat:Dummy",
+      "active": true,
+      "nodes": [
+        "node2"
+      ],
+      "group": "group2-clone/group2",
+      "clone": true,
+      "clone_id": "group2",
+      "ms_id": null,
+      "failed": false,
+      "orphaned": false,
+      "options": {},
+      "stonith": false,
+      "ms": false,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {},
+      "meta_attr": [
+        {
+          "key": "a",
+          "value": "2",
+          "id": "group2-meta_attributes-a",
+          "parent": "group2"
+        },
+        {
+          "key": "c",
+          "value": "2",
+          "id": "group2-meta_attributes-c",
+          "parent": "group2"
+        },
+        {
+          "key": "d",
+          "value": "2",
+          "id": "group2-meta_attributes-d",
+          "parent": "group2"
+        },
+        {
+          "key": "b",
+          "value": "6",
+          "id": "dummy6-meta_attributes-b",
+          "parent": "dummy6"
+        }
+      ]
+    },
+    {
+      "id": "dummy5",
+      "agentname": "ocf::heartbeat:Dummy",
+      "active": true,
+      "nodes": [
+        "node3"
+      ],
+      "group": "group2-clone/group2",
+      "clone": true,
+      "clone_id": "group2",
+      "ms_id": null,
+      "failed": false,
+      "orphaned": false,
+      "options": {},
+      "stonith": false,
+      "ms": false,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {},
+      "meta_attr": [
+        {
+          "key": "a",
+          "value": "2",
+          "id": "group2-meta_attributes-a",
+          "parent": "group2"
+        },
+        {
+          "key": "c",
+          "value": "2",
+          "id": "group2-meta_attributes-c",
+          "parent": "group2"
+        },
+        {
+          "key": "d",
+          "value": "2",
+          "id": "group2-meta_attributes-d",
+          "parent": "group2"
+        },
+        {
+          "key": "b",
+          "value": "5",
+          "id": "dummy5-meta_attributes-b",
+          "parent": "dummy5"
+        },
+        {
+          "key": "x",
+          "value": "0",
+          "id": "dummy5-meta_attributes-x",
+          "parent": "dummy5"
+        }
+      ]
+    },
+    {
+      "id": "dummy5",
+      "agentname": "ocf::heartbeat:Dummy",
+      "active": true,
+      "nodes": [
+        "node1"
+      ],
+      "group": "group2-clone/group2",
+      "clone": true,
+      "clone_id": "group2",
+      "ms_id": null,
+      "failed": false,
+      "orphaned": false,
+      "options": {},
+      "stonith": false,
+      "ms": false,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {},
+      "meta_attr": [
+        {
+          "key": "a",
+          "value": "2",
+          "id": "group2-meta_attributes-a",
+          "parent": "group2"
+        },
+        {
+          "key": "c",
+          "value": "2",
+          "id": "group2-meta_attributes-c",
+          "parent": "group2"
+        },
+        {
+          "key": "d",
+          "value": "2",
+          "id": "group2-meta_attributes-d",
+          "parent": "group2"
+        },
+        {
+          "key": "b",
+          "value": "5",
+          "id": "dummy5-meta_attributes-b",
+          "parent": "dummy5"
+        },
+        {
+          "key": "x",
+          "value": "0",
+          "id": "dummy5-meta_attributes-x",
+          "parent": "dummy5"
+        }
+      ]
+    },
+    {
+      "id": "dummy5",
+      "agentname": "ocf::heartbeat:Dummy",
+      "active": true,
+      "nodes": [
+        "node2"
+      ],
+      "group": "group2-clone/group2",
+      "clone": true,
+      "clone_id": "group2",
+      "ms_id": null,
+      "failed": false,
+      "orphaned": false,
+      "options": {},
+      "stonith": false,
+      "ms": false,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {},
+      "meta_attr": [
+        {
+          "key": "a",
+          "value": "2",
+          "id": "group2-meta_attributes-a",
+          "parent": "group2"
+        },
+        {
+          "key": "c",
+          "value": "2",
+          "id": "group2-meta_attributes-c",
+          "parent": "group2"
+        },
+        {
+          "key": "d",
+          "value": "2",
+          "id": "group2-meta_attributes-d",
+          "parent": "group2"
+        },
+        {
+          "key": "b",
+          "value": "5",
+          "id": "dummy5-meta_attributes-b",
+          "parent": "dummy5"
+        },
+        {
+          "key": "x",
+          "value": "0",
+          "id": "dummy5-meta_attributes-x",
+          "parent": "dummy5"
+        }
+      ]
+    }]'
+    hash = JSON.parse(json, {:symbolize_names => true})
+    assert(hash == obj.to_status)
+  end
+
+  def test_to_status_primitive_version2
+    obj = ClusterEntity::Clone.new(
+      @cib.elements["//clone[@id='dummy-clone']"],
+      @crm_mon,
+      ClusterEntity::get_rsc_status(@crm_mon)
+    )
+    json ='{
+      "id": "dummy-clone",
+      "meta_attr": [
+        {
+          "id": "dummy-clone-meta_attributes-aaa",
+          "name": "aaa",
+          "value": "222"
+        },
+        {
+          "id": "dummy-clone-meta_attributes-ccc",
+          "name": "ccc",
+          "value": "222"
+        }
+      ],
+      "error_list": [],
+      "warning_list": [],
+      "class_type": "clone",
+      "parent_id": null,
+      "disabled": false,
+      "status": "running",
+      "member": {
+        "id": "dummy",
+        "meta_attr": [
+          {
+            "id": "dummy-meta_attributes-aaa",
+            "name": "aaa",
+            "value": "111"
+          },
+          {
+            "id": "dummy-meta_attributes-bbb",
+            "name": "bbb",
+            "value": "111"
+          }
+        ],
+        "utilization": [],
+        "error_list": [],
+        "warning_list": [],
+        "class_type": "primitive",
+        "parent_id": "dummy-clone",
+        "disabled": false,
+        "agentname": "ocf::heartbeat:Dummy",
+        "provider": "heartbeat",
+        "type": "Dummy",
+        "stonith": false,
+        "instance_attr": [],
+        "status": "running",
+        "class": "ocf",
+        "crm_status": [
+          {
+            "id": "dummy",
+            "resource_agent": "ocf::heartbeat:Dummy",
+            "managed": true,
+            "failed": false,
+            "role": "Started",
+            "active": true,
+            "orphaned": false,
+            "failure_ignored": false,
+            "nodes_running_on": 1,
+            "pending": null,
+            "node": {
+              "name": "node3",
+              "id": "3",
+              "cached": false
+            }
+          },
+          {
+            "id": "dummy",
+            "resource_agent": "ocf::heartbeat:Dummy",
+            "managed": true,
+            "failed": false,
+            "role": "Started",
+            "active": true,
+            "orphaned": false,
+            "failure_ignored": false,
+            "nodes_running_on": 1,
+            "pending": null,
+            "node": {
+              "name": "node1",
+              "id": "1",
+              "cached": false
+            }
+          },
+          {
+            "id": "dummy",
+            "resource_agent": "ocf::heartbeat:Dummy",
+            "managed": true,
+            "failed": false,
+            "role": "Started",
+            "active": true,
+            "orphaned": false,
+            "failure_ignored": false,
+            "nodes_running_on": 1,
+            "pending": null,
+            "node": {
+              "name": "node2",
+              "id": "2",
+              "cached": false
+            }
+          }
+        ],
+        "operations": []
+      }
+    }'
+    hash = JSON.parse(json, {:symbolize_names => true})
+    # assert_equal_hashes(hash, obj.to_status('2'))
+    assert(hash == obj.to_status('2'))
+  end
+
+  def test_to_status_group_version2
+    obj = ClusterEntity::Clone.new(
+      @cib.elements["//clone[@id='group2-clone']"],
+      @crm_mon,
+      ClusterEntity::get_rsc_status(@crm_mon)
+    )
+    json = '{
+      "id": "group2-clone",
+      "meta_attr": [
+        {
+          "id": "group2-clone-meta_attributes-a",
+          "name": "a",
+          "value": "1"
+        },
+        {
+          "id": "group2-clone-meta_attributes-d",
+          "name": "d",
+          "value": "1"
+        }
+      ],
+      "error_list": [],
+      "warning_list": [],
+      "class_type": "clone",
+      "parent_id": null,
+      "disabled": false,
+      "status": "running",
+      "member": {
+        "id": "group2",
+        "meta_attr": [
+          {
+            "id": "group2-meta_attributes-a",
+            "name": "a",
+            "value": "2"
+          },
+          {
+            "id": "group2-meta_attributes-c",
+            "name": "c",
+            "value": "2"
+          },
+          {
+            "id": "group2-meta_attributes-d",
+            "name": "d",
+            "value": "2"
+          }
+        ],
+        "error_list": [],
+        "warning_list": [],
+        "class_type": "group",
+        "parent_id": "group2-clone",
+        "disabled": false,
+        "status": "running",
+        "members": [
+          {
+            "id": "dummy6",
+            "meta_attr": [
+              {
+                "id": "dummy6-meta_attributes-a",
+                "name": "a",
+                "value": "6"
+              },
+              {
+                "id": "dummy6-meta_attributes-b",
+                "name": "b",
+                "value": "6"
+              }
+            ],
+            "utilization": [
+              {
+                "id": "dummy6-utilization-util1",
+                "name": "util1",
+                "value": "8"
+              }
+            ],
+            "error_list": [],
+            "warning_list": [],
+            "class_type": "primitive",
+            "parent_id": "group2",
+            "disabled": false,
+            "agentname": "ocf::heartbeat:Dummy",
+            "provider": "heartbeat",
+            "type": "Dummy",
+            "stonith": false,
+            "instance_attr": [],
+            "status": "running",
+            "class": "ocf",
+            "crm_status": [
+              {
+                "id": "dummy6",
+                "resource_agent": "ocf::heartbeat:Dummy",
+                "managed": true,
+                "failed": false,
+                "role": "Started",
+                "active": true,
+                "orphaned": false,
+                "failure_ignored": false,
+                "nodes_running_on": 1,
+                "pending": null,
+                "node": {
+                  "name": "node3",
+                  "id": "3",
+                  "cached": false
+                }
+              },
+              {
+                "id": "dummy6",
+                "resource_agent": "ocf::heartbeat:Dummy",
+                "managed": true,
+                "failed": false,
+                "role": "Started",
+                "active": true,
+                "orphaned": false,
+                "failure_ignored": false,
+                "nodes_running_on": 1,
+                "pending": null,
+                "node": {
+                  "name": "node1",
+                  "id": "1",
+                  "cached": false
+                }
+              },
+              {
+                "id": "dummy6",
+                "resource_agent": "ocf::heartbeat:Dummy",
+                "managed": true,
+                "failed": false,
+                "role": "Started",
+                "active": true,
+                "orphaned": false,
+                "failure_ignored": false,
+                "nodes_running_on": 1,
+                "pending": null,
+                "node": {
+                  "name": "node2",
+                  "id": "2",
+                  "cached": false
+                }
+              }
+            ],
+            "operations": []
+          },
+          {
+            "id": "dummy5",
+            "meta_attr": [
+              {
+                "id": "dummy5-meta_attributes-a",
+                "name": "a",
+                "value": "5"
+              },
+              {
+                "id": "dummy5-meta_attributes-b",
+                "name": "b",
+                "value": "5"
+              },
+              {
+                "id": "dummy5-meta_attributes-x",
+                "name": "x",
+                "value": "0"
+              }
+            ],
+            "utilization": [],
+            "error_list": [],
+            "warning_list": [],
+            "class_type": "primitive",
+            "parent_id": "group2",
+            "disabled": false,
+            "agentname": "ocf::heartbeat:Dummy",
+            "provider": "heartbeat",
+            "type": "Dummy",
+            "stonith": false,
+            "instance_attr": [],
+            "status": "running",
+            "class": "ocf",
+            "crm_status": [
+              {
+                "id": "dummy5",
+                "resource_agent": "ocf::heartbeat:Dummy",
+                "managed": true,
+                "failed": false,
+                "role": "Started",
+                "active": true,
+                "orphaned": false,
+                "failure_ignored": false,
+                "nodes_running_on": 1,
+                "pending": null,
+                "node": {
+                  "name": "node3",
+                  "id": "3",
+                  "cached": false
+                }
+              },
+              {
+                "id": "dummy5",
+                "resource_agent": "ocf::heartbeat:Dummy",
+                "managed": true,
+                "failed": false,
+                "role": "Started",
+                "active": true,
+                "orphaned": false,
+                "failure_ignored": false,
+                "nodes_running_on": 1,
+                "pending": null,
+                "node": {
+                  "name": "node1",
+                  "id": "1",
+                  "cached": false
+                }
+              },
+              {
+                "id": "dummy5",
+                "resource_agent": "ocf::heartbeat:Dummy",
+                "managed": true,
+                "failed": false,
+                "role": "Started",
+                "active": true,
+                "orphaned": false,
+                "failure_ignored": false,
+                "nodes_running_on": 1,
+                "pending": null,
+                "node": {
+                  "name": "node2",
+                  "id": "2",
+                  "cached": false
+                }
+              }
+            ],
+            "operations": []
+          }
+        ]
+      }
+    }'
+    hash = JSON.parse(json, {:symbolize_names => true})
+    # assert_equal_hashes(hash, obj.to_status('2'))
+    assert(hash == obj.to_status('2'))
+  end
+end
+
+class TestMasterSlave < Test::Unit::TestCase
+  def setup
+    @cib = REXML::Document.new(File.read(File.join(CURRENT_DIR, CIB_FILE)))
+    @crm_mon = REXML::Document.new(File.read(File.join(CURRENT_DIR, CRM_FILE)))
+  end
+
+  def test_init_invalid_element
+    xml = '<primitve id="dummy"/>'
+    obj = ClusterEntity::MasterSlave.new(REXML::Document.new(xml).elements['*'])
+    assert_nil(obj.id)
+    assert_nil(obj.parent)
+    assert(obj.meta_attr.empty?)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_nil(obj.member)
+    assert_equal(false, obj.unique)
+    assert_equal(false, obj.managed)
+    assert_equal(false, obj.failed)
+    assert_equal(false, obj.failure_ignored)
+  end
+
+  def test_init_empty_element
+    xml = '<master id="dummy-clone"/>'
+    obj = ClusterEntity::MasterSlave.new(REXML::Document.new(xml).elements['*'])
+    assert_equal('dummy-clone', obj.id)
+    assert_nil(obj.parent)
+    assert(obj.meta_attr.empty?)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_nil(obj.member)
+    assert_equal(false, obj.unique)
+    assert_equal(false, obj.managed)
+    assert_equal(false, obj.failed)
+    assert_equal(false, obj.failure_ignored)
+  end
+
+  def test_init_primitive_with_crm
+    obj = ClusterEntity::MasterSlave.new(
+      @cib.elements["//master[@id='ms-master']"],
+      @crm_mon,
+      ClusterEntity::get_rsc_status(@crm_mon)
+    )
+    assert_equal('ms-master', obj.id)
+    assert_nil(obj.parent)
+    meta_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'ms-master-meta_attributes-a',
+      'a',
+      '1'
+    ) << ClusterEntity::NvPair.new(
+      'ms-master-meta_attributes-b',
+      'b',
+      '1'
+    )
+    assert_equal_NvSet(meta_attr, obj.meta_attr)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_not_nil(obj.member)
+    assert_equal(false, obj.unique)
+    assert(obj.managed)
+    assert_equal(false, obj.failed)
+    assert_equal(false, obj.failure_ignored)
+
+    assert_equal(1, obj.masters.length)
+    assert_equal('node3', obj.masters[0])
+
+    assert_equal(2, obj.slaves.length)
+    assert(obj.slaves.include?('node1'))
+    assert(obj.slaves.include?('node2'))
+
+    m = obj.member
+    assert_instance_of(ClusterEntity::Primitive, m)
+    assert_equal(obj, m.parent)
+    assert_equal('ms', m.id)
+    assert_equal(3, m.crm_status.length)
+  end
+
+  def test_init_primitive
+    obj = ClusterEntity::MasterSlave.new(@cib.elements["//master[@id='ms-master']"])
+    assert_equal('ms-master', obj.id)
+    assert_nil(obj.parent)
+    meta_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'ms-master-meta_attributes-a',
+      'a',
+      '1'
+    ) << ClusterEntity::NvPair.new(
+      'ms-master-meta_attributes-b',
+      'b',
+      '1'
+    )
+    assert_equal_NvSet(meta_attr, obj.meta_attr)
+    assert(obj.error_list.empty?)
+    assert_equal(1, obj.warning_list.length)
+    assert_not_nil(obj.member)
+    assert_equal(false, obj.unique)
+    assert_equal(false, obj.managed)
+    assert_equal(false, obj.failed)
+    assert_equal(false, obj.failure_ignored)
+
+    assert(obj.masters.empty?)
+    assert(obj.slaves.empty?)
+
+    m = obj.member
+    assert_instance_of(ClusterEntity::Primitive, m)
+    assert_equal(obj, m.parent)
+    assert_equal('ms', m.id)
+    assert(m.crm_status.empty?)
+  end
+
+  def test_init_group_with_crm
+    obj = ClusterEntity::MasterSlave.new(
+      @cib.elements["//master[@id='group3-master']"],
+      @crm_mon,
+      ClusterEntity::get_rsc_status(@crm_mon)
+    )
+    assert_equal('group3-master', obj.id)
+    assert_nil(obj.parent)
+    meta_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'group3-master-meta_attributes-a',
+      'a',
+      '0'
+    ) << ClusterEntity::NvPair.new(
+      'group3-master-meta_attributes-c',
+      'c',
+      '0'
+    )
+    assert_equal_NvSet(meta_attr, obj.meta_attr)
+    assert(obj.error_list.empty?)
+    assert(obj.warning_list.empty?)
+    assert_not_nil(obj.member)
+    assert_equal(false, obj.unique)
+    assert(obj.managed)
+    assert_equal(false, obj.failed)
+    assert_equal(false, obj.failure_ignored)
+
+    assert_equal(2, obj.masters.length)
+    assert_equal('node3', obj.masters[0])
+    assert_equal('node3', obj.masters[1])
+
+    assert_equal(4, obj.slaves.length)
+    assert(['node1', 'node2'].to_set == obj.slaves.to_set)
+
+    g = obj.member
+    assert_instance_of(ClusterEntity::Group, g)
+    assert_equal(obj, g.parent)
+    assert_equal('group3', g.id)
+    assert_equal(2, g.members.length)
+
+    m = g.members[0]
+    assert_instance_of(ClusterEntity::Primitive, m)
+    assert_equal('ms1', m.id)
+    assert_equal(3, m.crm_status.length)
+
+    m = g.members[1]
+    assert_instance_of(ClusterEntity::Primitive, m)
+    assert_equal('ms2', m.id)
+    assert_equal(3, m.crm_status.length)
+  end
+
+  def test_init_group
+    obj = ClusterEntity::MasterSlave.new(@cib.elements["//master[@id='group3-master']"])
+    assert_equal('group3-master', obj.id)
+    assert_nil(obj.parent)
+    meta_attr = ClusterEntity::NvSet.new << ClusterEntity::NvPair.new(
+      'group3-master-meta_attributes-a',
+      'a',
+      '0'
+    ) << ClusterEntity::NvPair.new(
+      'group3-master-meta_attributes-c',
+      'c',
+      '0'
+    )
+    assert_equal_NvSet(meta_attr, obj.meta_attr)
+    assert(obj.error_list.empty?)
+    assert_equal(1, obj.warning_list.length)
+    assert_not_nil(obj.member)
+    assert_equal(false, obj.unique)
+    assert_equal(false, obj.managed)
+    assert_equal(false, obj.failed)
+    assert_equal(false, obj.failure_ignored)
+
+    assert(obj.masters.empty?)
+    assert(obj.slaves.empty?)
+
+    g = obj.member
+    assert_instance_of(ClusterEntity::Group, g)
+    assert_equal(obj, g.parent)
+    assert_equal('group3', g.id)
+    assert_equal(2, g.members.length)
+
+    m = g.members[0]
+    assert_instance_of(ClusterEntity::Primitive, m)
+    assert_equal('ms1', m.id)
+    assert(m.crm_status.empty?)
+
+    m = g.members[1]
+    assert_instance_of(ClusterEntity::Primitive, m)
+    assert_equal('ms2', m.id)
+    assert(m.crm_status.empty?)
+  end
+
+  def test_to_status_primitive_version1
+    obj = ClusterEntity::MasterSlave.new(
+      @cib.elements["//master[@id='ms-master']"],
+      @crm_mon,
+      ClusterEntity::get_rsc_status(@crm_mon)
+    )
+    json ='[{
+      "id": "ms",
+      "agentname": "ocf::pacemaker:Stateful",
+      "active": true,
+      "nodes": [
+        "node3"
+      ],
+      "group": null,
+      "clone": false,
+      "clone_id": null,
+      "ms_id": "ms-master",
+      "failed": false,
+      "orphaned": false,
+      "options": {},
+      "stonith": false,
+      "ms": true,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {},
+      "meta_attr": [
+        {
+          "key": "a",
+          "value": "1",
+          "id": "ms-master-meta_attributes-a",
+          "parent": "ms-master"
+        },
+        {
+          "key": "b",
+          "value": "1",
+          "id": "ms-master-meta_attributes-b",
+          "parent": "ms-master"
+        },
+        {
+          "key": "c",
+          "value": "0",
+          "id": "ms-meta_attributes-c",
+          "parent": "ms"
+        }
+      ]
+    },
+    {
+      "id": "ms",
+      "agentname": "ocf::pacemaker:Stateful",
+      "active": true,
+      "nodes": [
+        "node1"
+      ],
+      "group": null,
+      "clone": false,
+      "clone_id": null,
+      "ms_id": "ms-master",
+      "failed": false,
+      "orphaned": false,
+      "options": {},
+      "stonith": false,
+      "ms": true,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {},
+      "meta_attr": [
+        {
+          "key": "a",
+          "value": "1",
+          "id": "ms-master-meta_attributes-a",
+          "parent": "ms-master"
+        },
+        {
+          "key": "b",
+          "value": "1",
+          "id": "ms-master-meta_attributes-b",
+          "parent": "ms-master"
+        },
+        {
+          "key": "c",
+          "value": "0",
+          "id": "ms-meta_attributes-c",
+          "parent": "ms"
+        }
+      ]
+    },
+    {
+      "id": "ms",
+      "agentname": "ocf::pacemaker:Stateful",
+      "active": true,
+      "nodes": [
+        "node2"
+      ],
+      "group": null,
+      "clone": false,
+      "clone_id": null,
+      "ms_id": "ms-master",
+      "failed": false,
+      "orphaned": false,
+      "options": {},
+      "stonith": false,
+      "ms": true,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {},
+      "meta_attr": [
+        {
+          "key": "a",
+          "value": "1",
+          "id": "ms-master-meta_attributes-a",
+          "parent": "ms-master"
+        },
+        {
+          "key": "b",
+          "value": "1",
+          "id": "ms-master-meta_attributes-b",
+          "parent": "ms-master"
+        },
+        {
+          "key": "c",
+          "value": "0",
+          "id": "ms-meta_attributes-c",
+          "parent": "ms"
+        }
+      ]
+    }]'
+    hash = JSON.parse(json, {:symbolize_names => true})
+    # assert_equal_status(hash, obj.to_status)
+    assert(hash == obj.to_status)
+  end
+
+  def test_to_status_group_version1
+    obj = ClusterEntity::MasterSlave.new(
+      @cib.elements["//master[@id='group3-master']"],
+      @crm_mon,
+      ClusterEntity::get_rsc_status(@crm_mon)
+    )
+    json ='[{
+      "id": "ms1",
+      "agentname": "ocf::pacemaker:Stateful",
+      "active": true,
+      "nodes": [
+        "node3"
+      ],
+      "group": "group3-master/group3",
+      "clone": false,
+      "clone_id": null,
+      "ms_id": "group3",
+      "failed": false,
+      "orphaned": false,
+      "options": {},
+      "stonith": false,
+      "ms": true,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {},
+      "meta_attr": [
+        {
+          "key": "a",
+          "value": "3",
+          "id": "group3-meta_attributes-a",
+          "parent": "group3"
+        },
+        {
+          "key": "b",
+          "value": "3",
+          "id": "group3-meta_attributes-b",
+          "parent": "group3"
+        },
+        {
+          "key": "c",
+          "value": "3",
+          "id": "group3-meta_attributes-c",
+          "parent": "group3"
+        },
+        {
+          "key": "d",
+          "value": "1",
+          "id": "ms1-meta_attributes-d",
+          "parent": "ms1"
+        }
+      ]
+    },
+    {
+      "id": "ms1",
+      "agentname": "ocf::pacemaker:Stateful",
+      "active": true,
+      "nodes": [
+        "node1"
+      ],
+      "group": "group3-master/group3",
+      "clone": false,
+      "clone_id": null,
+      "ms_id": "group3",
+      "failed": false,
+      "orphaned": false,
+      "options": {},
+      "stonith": false,
+      "ms": true,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {},
+      "meta_attr": [
+        {
+          "key": "a",
+          "value": "3",
+          "id": "group3-meta_attributes-a",
+          "parent": "group3"
+        },
+        {
+          "key": "b",
+          "value": "3",
+          "id": "group3-meta_attributes-b",
+          "parent": "group3"
+        },
+        {
+          "key": "c",
+          "value": "3",
+          "id": "group3-meta_attributes-c",
+          "parent": "group3"
+        },
+        {
+          "key": "d",
+          "value": "1",
+          "id": "ms1-meta_attributes-d",
+          "parent": "ms1"
+        }
+      ]
+    },
+    {
+      "id": "ms1",
+      "agentname": "ocf::pacemaker:Stateful",
+      "active": true,
+      "nodes": [
+        "node2"
+      ],
+      "group": "group3-master/group3",
+      "clone": false,
+      "clone_id": null,
+      "ms_id": "group3",
+      "failed": false,
+      "orphaned": false,
+      "options": {},
+      "stonith": false,
+      "ms": true,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {},
+      "meta_attr": [
+        {
+          "key": "a",
+          "value": "3",
+          "id": "group3-meta_attributes-a",
+          "parent": "group3"
+        },
+        {
+          "key": "b",
+          "value": "3",
+          "id": "group3-meta_attributes-b",
+          "parent": "group3"
+        },
+        {
+          "key": "c",
+          "value": "3",
+          "id": "group3-meta_attributes-c",
+          "parent": "group3"
+        },
+        {
+          "key": "d",
+          "value": "1",
+          "id": "ms1-meta_attributes-d",
+          "parent": "ms1"
+        }
+      ]
+    },
+    {
+      "id": "ms2",
+      "agentname": "ocf::pacemaker:Stateful",
+      "active": true,
+      "nodes": [
+        "node3"
+      ],
+      "group": "group3-master/group3",
+      "clone": false,
+      "clone_id": null,
+      "ms_id": "group3",
+      "failed": false,
+      "orphaned": false,
+      "options": {},
+      "stonith": false,
+      "ms": true,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {},
+      "meta_attr": [
+        {
+          "key": "a",
+          "value": "3",
+          "id": "group3-meta_attributes-a",
+          "parent": "group3"
+        },
+        {
+          "key": "b",
+          "value": "3",
+          "id": "group3-meta_attributes-b",
+          "parent": "group3"
+        },
+        {
+          "key": "c",
+          "value": "3",
+          "id": "group3-meta_attributes-c",
+          "parent": "group3"
+        },
+        {
+          "key": "d",
+          "value": "2",
+          "id": "ms2-meta_attributes-d",
+          "parent": "ms2"
+        }
+      ]
+    },
+    {
+      "id": "ms2",
+      "agentname": "ocf::pacemaker:Stateful",
+      "active": true,
+      "nodes": [
+        "node1"
+      ],
+      "group": "group3-master/group3",
+      "clone": false,
+      "clone_id": null,
+      "ms_id": "group3",
+      "failed": false,
+      "orphaned": false,
+      "options": {},
+      "stonith": false,
+      "ms": true,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {},
+      "meta_attr": [
+        {
+          "key": "a",
+          "value": "3",
+          "id": "group3-meta_attributes-a",
+          "parent": "group3"
+        },
+        {
+          "key": "b",
+          "value": "3",
+          "id": "group3-meta_attributes-b",
+          "parent": "group3"
+        },
+        {
+          "key": "c",
+          "value": "3",
+          "id": "group3-meta_attributes-c",
+          "parent": "group3"
+        },
+        {
+          "key": "d",
+          "value": "2",
+          "id": "ms2-meta_attributes-d",
+          "parent": "ms2"
+        }
+      ]
+    },
+    {
+      "id": "ms2",
+      "agentname": "ocf::pacemaker:Stateful",
+      "active": true,
+      "nodes": [
+        "node2"
+      ],
+      "group": "group3-master/group3",
+      "clone": false,
+      "clone_id": null,
+      "ms_id": "group3",
+      "failed": false,
+      "orphaned": false,
+      "options": {},
+      "stonith": false,
+      "ms": true,
+      "disabled": false,
+      "operations": [],
+      "instance_attr": {},
+      "meta_attr": [
+        {
+          "key": "a",
+          "value": "3",
+          "id": "group3-meta_attributes-a",
+          "parent": "group3"
+        },
+        {
+          "key": "b",
+          "value": "3",
+          "id": "group3-meta_attributes-b",
+          "parent": "group3"
+        },
+        {
+          "key": "c",
+          "value": "3",
+          "id": "group3-meta_attributes-c",
+          "parent": "group3"
+        },
+        {
+          "key": "d",
+          "value": "2",
+          "id": "ms2-meta_attributes-d",
+          "parent": "ms2"
+        }
+      ]
+    }]'
+    hash = JSON.parse(json, {:symbolize_names => true})
+    assert(hash == obj.to_status)
+  end
+
+  def test_to_status_primitive_version2
+    obj = ClusterEntity::MasterSlave.new(
+      @cib.elements["//master[@id='ms-master']"],
+      @crm_mon,
+      ClusterEntity::get_rsc_status(@crm_mon)
+    )
+    json ='{
+      "id": "ms-master",
+      "meta_attr": [
+        {
+          "id": "ms-master-meta_attributes-a",
+          "name": "a",
+          "value": "1"
+        },
+        {
+          "id": "ms-master-meta_attributes-b",
+          "name": "b",
+          "value": "1"
+        }
+      ],
+      "error_list": [],
+      "warning_list": [],
+      "class_type": "master",
+      "parent_id": null,
+      "disabled": false,
+      "status": "running",
+      "member": {
+        "id": "ms",
+        "meta_attr": [
+          {
+            "id": "ms-meta_attributes-a",
+            "name": "a",
+            "value": "0"
+          },
+          {
+            "id": "ms-meta_attributes-c",
+            "name": "c",
+            "value": "0"
+          }
+        ],
+        "utilization": [],
+        "error_list": [],
+        "warning_list": [],
+        "class_type": "primitive",
+        "parent_id": "ms-master",
+        "disabled": false,
+        "agentname": "ocf::pacemaker:Stateful",
+        "provider": "pacemaker",
+        "type": "Stateful",
+        "stonith": false,
+        "instance_attr": [],
+        "status": "running",
+        "class": "ocf",
+        "crm_status": [
+          {
+            "id": "ms",
+            "resource_agent": "ocf::pacemaker:Stateful",
+            "managed": true,
+            "failed": false,
+            "role": "Master",
+            "active": true,
+            "orphaned": false,
+            "failure_ignored": false,
+            "nodes_running_on": 1,
+            "pending": null,
+            "node": {
+              "name": "node3",
+              "id": "3",
+              "cached": false
+            }
+          },
+          {
+            "id": "ms",
+            "resource_agent": "ocf::pacemaker:Stateful",
+            "managed": true,
+            "failed": false,
+            "role": "Slave",
+            "active": true,
+            "orphaned": false,
+            "failure_ignored": false,
+            "nodes_running_on": 1,
+            "pending": null,
+            "node": {
+              "name": "node1",
+              "id": "1",
+              "cached": false
+            }
+          },
+          {
+            "id": "ms",
+            "resource_agent": "ocf::pacemaker:Stateful",
+            "managed": true,
+            "failed": false,
+            "role": "Slave",
+            "active": true,
+            "orphaned": false,
+            "failure_ignored": false,
+            "nodes_running_on": 1,
+            "pending": null,
+            "node": {
+              "name": "node2",
+              "id": "2",
+              "cached": false
+            }
+          }
+        ],
+        "operations": []
+      }
+    }'
+    hash = JSON.parse(json, {:symbolize_names => true})
+    assert(hash == obj.to_status('2'))
+  end
+
+  def test_to_status_group_version2
+    obj = ClusterEntity::MasterSlave.new(
+      @cib.elements["//master[@id='group3-master']"],
+      @crm_mon,
+      ClusterEntity::get_rsc_status(@crm_mon)
+    )
+    json = '{
+      "id": "group3-master",
+      "meta_attr": [
+        {
+          "id": "group3-master-meta_attributes-a",
+          "name": "a",
+          "value": "0"
+        },
+        {
+          "id": "group3-master-meta_attributes-c",
+          "name": "c",
+          "value": "0"
+        }
+      ],
+      "error_list": [],
+      "warning_list": [],
+      "class_type": "master",
+      "parent_id": null,
+      "disabled": false,
+      "status": "running",
+      "member": {
+        "id": "group3",
+        "meta_attr": [
+          {
+            "id": "group3-meta_attributes-a",
+            "name": "a",
+            "value": "3"
+          },
+          {
+            "id": "group3-meta_attributes-b",
+            "name": "b",
+            "value": "3"
+          },
+          {
+            "id": "group3-meta_attributes-c",
+            "name": "c",
+            "value": "3"
+          }
+        ],
+        "error_list": [],
+        "warning_list": [],
+        "class_type": "group",
+        "parent_id": "group3-master",
+        "disabled": false,
+        "status": "running",
+        "members": [
+          {
+            "id": "ms1",
+            "meta_attr": [
+              {
+                "id": "ms1-meta_attributes-a",
+                "name": "a",
+                "value": "1"
+              },
+              {
+                "id": "ms1-meta_attributes-b",
+                "name": "b",
+                "value": "1"
+              },
+              {
+                "id": "ms1-meta_attributes-d",
+                "name": "d",
+                "value": "1"
+              }
+            ],
+            "utilization": [],
+            "error_list": [],
+            "warning_list": [],
+            "class_type": "primitive",
+            "parent_id": "group3",
+            "disabled": false,
+            "agentname": "ocf::pacemaker:Stateful",
+            "provider": "pacemaker",
+            "type": "Stateful",
+            "stonith": false,
+            "instance_attr": [],
+            "status": "running",
+            "class": "ocf",
+            "crm_status": [
+              {
+                "id": "ms1",
+                "resource_agent": "ocf::pacemaker:Stateful",
+                "managed": true,
+                "failed": false,
+                "role": "Master",
+                "active": true,
+                "orphaned": false,
+                "failure_ignored": false,
+                "nodes_running_on": 1,
+                "pending": null,
+                "node": {
+                  "name": "node3",
+                  "id": "3",
+                  "cached": false
+                }
+              },
+              {
+                "id": "ms1",
+                "resource_agent": "ocf::pacemaker:Stateful",
+                "managed": true,
+                "failed": false,
+                "role": "Slave",
+                "active": true,
+                "orphaned": false,
+                "failure_ignored": false,
+                "nodes_running_on": 1,
+                "pending": null,
+                "node": {
+                  "name": "node1",
+                  "id": "1",
+                  "cached": false
+                }
+              },
+              {
+                "id": "ms1",
+                "resource_agent": "ocf::pacemaker:Stateful",
+                "managed": true,
+                "failed": false,
+                "role": "Slave",
+                "active": true,
+                "orphaned": false,
+                "failure_ignored": false,
+                "nodes_running_on": 1,
+                "pending": null,
+                "node": {
+                  "name": "node2",
+                  "id": "2",
+                  "cached": false
+                }
+              }
+            ],
+            "operations": []
+          },
+          {
+            "id": "ms2",
+            "meta_attr": [
+              {
+                "id": "ms2-meta_attributes-a",
+                "name": "a",
+                "value": "2"
+              },
+              {
+                "id": "ms2-meta_attributes-b",
+                "name": "b",
+                "value": "2"
+              },
+              {
+                "id": "ms2-meta_attributes-d",
+                "name": "d",
+                "value": "2"
+              }
+            ],
+            "utilization": [],
+            "error_list": [],
+            "warning_list": [],
+            "class_type": "primitive",
+            "parent_id": "group3",
+            "disabled": false,
+            "agentname": "ocf::pacemaker:Stateful",
+            "provider": "pacemaker",
+            "type": "Stateful",
+            "stonith": false,
+            "instance_attr": [],
+            "status": "running",
+            "class": "ocf",
+            "crm_status": [
+              {
+                "id": "ms2",
+                "resource_agent": "ocf::pacemaker:Stateful",
+                "managed": true,
+                "failed": false,
+                "role": "Master",
+                "active": true,
+                "orphaned": false,
+                "failure_ignored": false,
+                "nodes_running_on": 1,
+                "pending": null,
+                "node": {
+                  "name": "node3",
+                  "id": "3",
+                  "cached": false
+                }
+              },
+              {
+                "id": "ms2",
+                "resource_agent": "ocf::pacemaker:Stateful",
+                "managed": true,
+                "failed": false,
+                "role": "Slave",
+                "active": true,
+                "orphaned": false,
+                "failure_ignored": false,
+                "nodes_running_on": 1,
+                "pending": null,
+                "node": {
+                  "name": "node1",
+                  "id": "1",
+                  "cached": false
+                }
+              },
+              {
+                "id": "ms2",
+                "resource_agent": "ocf::pacemaker:Stateful",
+                "managed": true,
+                "failed": false,
+                "role": "Slave",
+                "active": true,
+                "orphaned": false,
+                "failure_ignored": false,
+                "nodes_running_on": 1,
+                "pending": null,
+                "node": {
+                  "name": "node2",
+                  "id": "2",
+                  "cached": false
+                }
+              }
+            ],
+            "operations": []
+          }
+        ]
+      }
+    }'
+    hash = JSON.parse(json, {:symbolize_names => true})
+    assert(hash == obj.to_status('2'))
+  end
+end
diff --git a/pcsd/test/test_config.rb b/pcsd/test/test_config.rb
new file mode 100644
index 0000000..6405a05
--- /dev/null
+++ b/pcsd/test/test_config.rb
@@ -0,0 +1,711 @@
+require 'test/unit'
+require 'fileutils'
+
+require 'pcsd_test_utils.rb'
+require 'config.rb'
+require 'permissions.rb'
+
+class TestConfig < Test::Unit::TestCase
+  def setup
+    $logger = MockLogger.new
+    FileUtils.cp(File.join(CURRENT_DIR, 'pcs_settings.conf'), CFG_PCSD_SETTINGS)
+  end
+
+  def test_parse_empty()
+    text = ''
+    cfg = PCSConfig.new(text)
+    assert_equal(0, cfg.clusters.length)
+    assert_equal(
+      [[
+        "error",
+        "Unable to parse pcs_settings file: A JSON text must at least contain two octets!"
+      ]],
+      $logger.log
+    )
+    assert_equal(
+'{
+  "format_version": 2,
+  "data_version": 0,
+  "clusters": [
+
+  ],
+  "permissions": {
+    "local_cluster": [
+
+    ]
+  }
+}',
+      cfg.text
+    )
+  end
+
+  def test_parse_format1()
+    text = '[]'
+    cfg = PCSConfig.new(text)
+    assert_equal(0, cfg.clusters.length)
+    assert_equal(
+'{
+  "format_version": 2,
+  "data_version": 0,
+  "clusters": [
+
+  ],
+  "permissions": {
+    "local_cluster": [
+      {
+        "type": "group",
+        "name": "haclient",
+        "allow": [
+          "grant",
+          "read",
+          "write"
+        ]
+      }
+    ]
+  }
+}',
+      cfg.text
+    )
+
+    text = '
+[
+  {
+    "name": "cluster71",
+    "nodes": [
+      "rh71-node1",
+      "rh71-node2"
+    ]
+  }
+]
+'
+    cfg = PCSConfig.new(text)
+    assert_equal(1, cfg.clusters.length)
+    assert_equal("cluster71", cfg.clusters[0].name)
+    assert_equal(["rh71-node1", "rh71-node2"], cfg.clusters[0].nodes)
+    assert_equal(
+'{
+  "format_version": 2,
+  "data_version": 0,
+  "clusters": [
+    {
+      "name": "cluster71",
+      "nodes": [
+        "rh71-node1",
+        "rh71-node2"
+      ]
+    }
+  ],
+  "permissions": {
+    "local_cluster": [
+      {
+        "type": "group",
+        "name": "haclient",
+        "allow": [
+          "grant",
+          "read",
+          "write"
+        ]
+      }
+    ]
+  }
+}',
+      cfg.text
+    )
+  end
+
+  def test_parse_format2()
+    text = '
+{
+  "format_version": 2
+}
+'
+    cfg = PCSConfig.new(text)
+    assert_equal(2, cfg.format_version)
+    assert_equal(0, cfg.data_version)
+    assert_equal(0, cfg.clusters.length)
+    assert_equal(
+'{
+  "format_version": 2,
+  "data_version": 0,
+  "clusters": [
+
+  ],
+  "permissions": {
+    "local_cluster": [
+
+    ]
+  }
+}',
+      cfg.text
+    )
+
+    text =
+'{
+  "format_version": 2,
+  "data_version": 9,
+  "clusters": [
+    {
+      "name": "cluster71",
+      "nodes": [
+        "rh71-node1",
+        "rh71-node2"
+      ]
+    }
+  ],
+  "permissions": {
+    "local_cluster": [
+
+    ]
+  }
+}'
+    cfg = PCSConfig.new(text)
+    assert_equal(2, cfg.format_version)
+    assert_equal(9, cfg.data_version)
+    assert_equal(1, cfg.clusters.length)
+    assert_equal("cluster71", cfg.clusters[0].name)
+    assert_equal(["rh71-node1", "rh71-node2"], cfg.clusters[0].nodes)
+    assert_equal(text, cfg.text)
+
+    text =
+'{
+  "format_version": 2,
+  "data_version": 9,
+  "clusters": [
+    {
+      "name": "cluster71",
+      "nodes": [
+        "rh71-node2",
+        "rh71-node1",
+        "rh71-node3",
+        "rh71-node2"
+      ]
+    },
+    {
+      "name": "abcd",
+      "nodes": [
+        "abcd-node2",
+        "abcd-node1",
+        "abcd-node3",
+        "abcd-node2"
+      ]
+    }
+  ],
+  "permissions": {
+    "local_cluster": [
+
+    ]
+  }
+}'
+    cfg = PCSConfig.new(text)
+    assert_equal(2, cfg.format_version)
+    assert_equal(9, cfg.data_version)
+    assert_equal(2, cfg.clusters.length)
+    assert_equal("cluster71", cfg.clusters[0].name)
+    assert_equal(
+      ["rh71-node1", "rh71-node2", "rh71-node3"],
+      cfg.clusters[0].nodes
+    )
+    out_text =
+'{
+  "format_version": 2,
+  "data_version": 9,
+  "clusters": [
+    {
+      "name": "cluster71",
+      "nodes": [
+        "rh71-node1",
+        "rh71-node2",
+        "rh71-node3"
+      ]
+    },
+    {
+      "name": "abcd",
+      "nodes": [
+        "abcd-node1",
+        "abcd-node2",
+        "abcd-node3"
+      ]
+    }
+  ],
+  "permissions": {
+    "local_cluster": [
+
+    ]
+  }
+}'
+    assert_equal(out_text, cfg.text)
+  end
+
+  def test_parse_format2_bad_cluster()
+    text =
+'{
+  "format_version": 2,
+  "data_version": 9,
+  "clusters": [
+    {
+      "name": "cluster71",
+      "nodes": [
+        "rh71-node2",
+        "rh71-node1",
+          [
+            "xxx",
+            "yyy"
+          ],
+        "rh71-node2"
+      ]
+    }
+  ]
+}'
+    cfg = PCSConfig.new(text)
+    assert_equal(2, cfg.format_version)
+    assert_equal(9, cfg.data_version)
+    assert_equal(1, cfg.clusters.length)
+    assert_equal("cluster71", cfg.clusters[0].name)
+    assert_equal(["rh71-node1", "rh71-node2"], cfg.clusters[0].nodes)
+    assert_equal(
+'{
+  "format_version": 2,
+  "data_version": 9,
+  "clusters": [
+    {
+      "name": "cluster71",
+      "nodes": [
+        "rh71-node1",
+        "rh71-node2"
+      ]
+    }
+  ],
+  "permissions": {
+    "local_cluster": [
+
+    ]
+  }
+}',
+      cfg.text
+    )
+  end
+
+  def test_parse_format2_permissions()
+    text =
+'{
+  "format_version": 2,
+  "data_version": 9,
+  "clusters": [
+    {
+      "name": "cluster71",
+      "nodes": [
+        "rh71-node1",
+        "rh71-node2"
+      ]
+    }
+  ],
+  "permissions": {
+    "local_cluster": [
+      {
+        "type": "group",
+        "name": "group2",
+        "allow": [
+          "read"
+        ]
+      },
+      {
+        "type": "user",
+        "name": "user2",
+        "allow": [
+
+        ]
+      },
+      {
+        "type": "group",
+        "name": "group2",
+        "allow": [
+          "grant"
+        ]
+      },
+      {
+        "type": "group",
+        "name": "group1",
+        "allow": [
+          "write", "full", "write"
+        ]
+      },
+      {
+        "type": "user",
+        "name": "user1",
+        "allow": [
+          "grant", "write", "grant", "read"
+        ]
+      }
+    ]
+  }
+}'
+    out_text =
+'{
+  "format_version": 2,
+  "data_version": 9,
+  "clusters": [
+    {
+      "name": "cluster71",
+      "nodes": [
+        "rh71-node1",
+        "rh71-node2"
+      ]
+    }
+  ],
+  "permissions": {
+    "local_cluster": [
+      {
+        "type": "group",
+        "name": "group1",
+        "allow": [
+          "full",
+          "write"
+        ]
+      },
+      {
+        "type": "group",
+        "name": "group2",
+        "allow": [
+          "grant",
+          "read"
+        ]
+      },
+      {
+        "type": "user",
+        "name": "user1",
+        "allow": [
+          "grant",
+          "read",
+          "write"
+        ]
+      },
+      {
+        "type": "user",
+        "name": "user2",
+        "allow": [
+
+        ]
+      }
+    ]
+  }
+}'
+    cfg = PCSConfig.new(text)
+    assert_equal(out_text, cfg.text)
+
+    perms = cfg.permissions_local
+    assert_equal(false, perms.allows?('user1', [], Permissions::FULL))
+    assert_equal(true, perms.allows?('user1', [], Permissions::GRANT))
+    assert_equal(true, perms.allows?('user1', [], Permissions::WRITE))
+    assert_equal(true, perms.allows?('user1', [], Permissions::READ))
+
+    assert_equal(true, perms.allows?('user1', ['group1'], Permissions::FULL))
+    assert_equal(true, perms.allows?('user1', ['group1'], Permissions::GRANT))
+    assert_equal(true, perms.allows?('user1', ['group1'], Permissions::WRITE))
+    assert_equal(true, perms.allows?('user1', ['group1'], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user2', [], Permissions::FULL))
+    assert_equal(false, perms.allows?('user2', [], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user2', [], Permissions::WRITE))
+    assert_equal(false, perms.allows?('user2', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user2', ['group2'], Permissions::FULL))
+    assert_equal(true, perms.allows?('user2', ['group2'], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user2', ['group2'], Permissions::WRITE))
+    assert_equal(true, perms.allows?('user2', ['group2'], Permissions::READ))
+  end
+
+  def test_in_use()
+    cfg = PCSConfig.new(File.open(CFG_PCSD_SETTINGS).read)
+
+    assert(cfg.is_cluster_name_in_use('cluster71'))
+    assert(cfg.is_cluster_name_in_use('cluster67'))
+    assert(! cfg.is_cluster_name_in_use('nonexistent'))
+
+    assert(cfg.is_node_in_use('rh71-node1'))
+    assert(cfg.is_node_in_use('rh67-node3'))
+    assert(! cfg.is_node_in_use('rh71-node3'))
+
+    assert_equal(
+      ["rh71-node1", "rh71-node2"],
+      cfg.get_nodes('cluster71')
+    )
+    assert_equal(
+      ["rh67-node1", "rh67-node2", "rh67-node3"],
+      cfg.get_nodes('cluster67')
+    )
+    assert_equal(
+      nil,
+      cfg.get_nodes('nonexistent')
+    )
+  end
+
+  def test_update_cluster()
+    cfg = PCSConfig.new(File.open(CFG_PCSD_SETTINGS).read)
+    assert_equal(
+      ["rh71-node1", "rh71-node2"],
+      cfg.get_nodes('cluster71')
+    )
+    assert_equal(
+      ["rh67-node1", "rh67-node2", "rh67-node3"],
+      cfg.get_nodes('cluster67')
+    )
+
+    cfg.update_cluster('cluster71', ["rh71-node1", "rh71-node2", "rh71-node3"])
+    assert_equal(
+      ["rh71-node1", "rh71-node2", "rh71-node3"],
+      cfg.get_nodes('cluster71')
+    )
+    assert_equal(
+      ["rh67-node1", "rh67-node2", "rh67-node3"],
+      cfg.get_nodes('cluster67')
+    )
+
+    cfg.update_cluster('cluster71', ["rh71-node1", "rh71-node2"])
+    assert_equal(
+      ["rh71-node1", "rh71-node2"],
+      cfg.get_nodes('cluster71')
+    )
+    assert_equal(
+      ["rh67-node1", "rh67-node2", "rh67-node3"],
+      cfg.get_nodes('cluster67')
+    )
+
+    cfg.update_cluster('cluster71', [])
+    assert(! cfg.is_cluster_name_in_use('cluster71'))
+    assert_equal(
+      ["rh67-node1", "rh67-node2", "rh67-node3"],
+      cfg.get_nodes('cluster67')
+    )
+
+    cfg.update_cluster(
+      'cluster67',
+      ['rh67-node3', [], 'rh67-node1', 'rh67-node2', ['xx'], 'rh67-node1']
+    )
+    assert_equal(
+      ["rh67-node1", "rh67-node2", "rh67-node3"],
+      cfg.get_nodes('cluster67')
+    )
+  end
+
+  def test_remove_cluster()
+    cfg = PCSConfig.new(File.open(CFG_PCSD_SETTINGS).read)
+    assert_equal(
+      ["rh71-node1", "rh71-node2"],
+      cfg.get_nodes('cluster71')
+    )
+    assert_equal(
+      ["rh67-node1", "rh67-node2", "rh67-node3"],
+      cfg.get_nodes('cluster67')
+    )
+
+    cfg.remove_cluster('nonexistent')
+    assert_equal(
+      ["rh71-node1", "rh71-node2"],
+      cfg.get_nodes('cluster71')
+    )
+    assert_equal(
+      ["rh67-node1", "rh67-node2", "rh67-node3"],
+      cfg.get_nodes('cluster67')
+    )
+
+    cfg.remove_cluster('cluster71')
+    assert(! cfg.is_cluster_name_in_use('cluster71'))
+    assert_equal(
+      ["rh67-node1", "rh67-node2", "rh67-node3"],
+      cfg.get_nodes('cluster67')
+    )
+  end
+
+  def test_cluster_nodes_equal?()
+    text =
+'{
+  "format_version": 2,
+  "data_version": 9,
+  "clusters": [
+    {
+      "name": "cluster71",
+      "nodes": [
+        "rh71-node1",
+        "rh71-node2"
+      ]
+    }
+  ],
+  "permissions": {
+    "local_cluster": [
+
+    ]
+  }
+}'
+    cfg = PCSConfig.new(text)
+
+    assert_equal(
+      true,
+      cfg.cluster_nodes_equal?('cluster71', ['rh71-node1', 'rh71-node2'])
+    )
+    assert_equal(
+      true,
+      cfg.cluster_nodes_equal?('cluster71', ['rh71-node1', 'rh71-node2', 'rh71-node1'])
+    )
+    assert_equal(
+      true,
+      cfg.cluster_nodes_equal?('cluster71', ['rh71-node2', 'rh71-node1'])
+    )
+    assert_equal(
+      false,
+      cfg.cluster_nodes_equal?('cluster71', [])
+    )
+    assert_equal(
+      false,
+      cfg.cluster_nodes_equal?('cluster71', ['rh71-node1'])
+    )
+    assert_equal(
+      false,
+      cfg.cluster_nodes_equal?('cluster71', ['rh71-node3', 'rh71-node1'])
+    )
+    assert_equal(
+      false,
+      cfg.cluster_nodes_equal?('cluster71', ['rh71-node1', 'rh71-node2', 'rh71-node3'])
+    )
+
+    assert_equal(
+      false,
+      cfg.cluster_nodes_equal?('abcd', ['rh71-node3', 'rh71-node1'])
+    )
+    assert_equal(
+      true,
+      cfg.cluster_nodes_equal?('abcd', [])
+    )
+  end
+end
+
+
+class TestTokens < Test::Unit::TestCase
+  def setup
+    $logger = MockLogger.new
+    FileUtils.cp(File.join(CURRENT_DIR, 'tokens'), CFG_PCSD_TOKENS)
+  end
+
+  def test_parse_empty()
+    text = ''
+    cfg = PCSTokens.new(text)
+    assert_equal(0, cfg.tokens.length)
+    assert_equal(
+      [[
+        "error",
+        "Unable to parse tokens file: A JSON text must at least contain two octets!"
+      ]],
+      $logger.log
+    )
+    assert_equal(
+'{
+  "format_version": 2,
+  "data_version": 0,
+  "tokens": {
+  }
+}',
+      cfg.text
+    )
+  end
+
+  def test_parse_format1()
+    text = '{}'
+    cfg = PCSTokens.new(text)
+    assert_equal(0, cfg.tokens.length)
+
+    text = '{"rh7-1": "token-rh7-1", "rh7-2": "token-rh7-2"}'
+    cfg = PCSTokens.new(text)
+    assert_equal(2, cfg.tokens.length)
+    assert_equal('token-rh7-1', cfg.tokens['rh7-1'])
+    assert_equal(
+'{
+  "format_version": 2,
+  "data_version": 0,
+  "tokens": {
+    "rh7-1": "token-rh7-1",
+    "rh7-2": "token-rh7-2"
+  }
+}',
+      cfg.text
+    )
+  end
+
+  def test_parse_format2()
+    text =
+'{
+  "format_version": 2,
+  "tokens": {}
+}'
+    cfg = PCSTokens.new(text)
+    assert_equal(2, cfg.format_version)
+    assert_equal(0, cfg.data_version)
+    assert_equal(0, cfg.tokens.length)
+    assert_equal(
+'{
+  "format_version": 2,
+  "data_version": 0,
+  "tokens": {
+  }
+}',
+      cfg.text
+    )
+
+    text =
+'{
+  "format_version": 2,
+  "data_version": 9,
+  "tokens": {
+    "rh7-1": "token-rh7-1",
+    "rh7-2": "token-rh7-2"
+  }
+}'
+    cfg = PCSTokens.new(text)
+    assert_equal(2, cfg.format_version)
+    assert_equal(9, cfg.data_version)
+    assert_equal(2, cfg.tokens.length)
+    assert_equal('token-rh7-1', cfg.tokens['rh7-1'])
+    assert_equal(text, cfg.text)
+  end
+
+  def test_update()
+    cfg = PCSTokens.new(File.open(CFG_PCSD_TOKENS).read)
+    assert_equal(
+      {
+        'rh7-1' => '2a8b40aa-b539-4713-930a-483468d62ef4',
+        'rh7-2' => '76174e2c-09e8-4435-b318-5c6b8250a22c',
+        'rh7-3' => '55844951-9ae5-4103-bb4a-64f9c1ea0a71',
+      },
+      cfg.tokens
+    )
+
+    cfg.tokens.delete('rh7-2')
+    assert_equal(
+      {
+        'rh7-1' => '2a8b40aa-b539-4713-930a-483468d62ef4',
+        'rh7-3' => '55844951-9ae5-4103-bb4a-64f9c1ea0a71',
+      },
+      cfg.tokens
+    )
+
+    cfg.tokens['rh7-2'] = '76174e2c-09e8-4435-b318-5c6b8250a22c'
+    assert_equal(
+      {
+        'rh7-1' => '2a8b40aa-b539-4713-930a-483468d62ef4',
+        'rh7-3' => '55844951-9ae5-4103-bb4a-64f9c1ea0a71',
+        'rh7-2' => '76174e2c-09e8-4435-b318-5c6b8250a22c',
+      },
+      cfg.tokens
+    )
+    assert_equal(
+'{
+  "format_version": 2,
+  "data_version": 9,
+  "tokens": {
+    "rh7-1": "2a8b40aa-b539-4713-930a-483468d62ef4",
+    "rh7-2": "76174e2c-09e8-4435-b318-5c6b8250a22c",
+    "rh7-3": "55844951-9ae5-4103-bb4a-64f9c1ea0a71"
+  }
+}',
+      cfg.text
+    )
+  end
+end
diff --git a/pcsd/test/test_corosyncconf.rb b/pcsd/test/test_corosyncconf.rb
new file mode 100644
index 0000000..27476b5
--- /dev/null
+++ b/pcsd/test/test_corosyncconf.rb
@@ -0,0 +1,1208 @@
+require 'test/unit'
+
+require 'pcsd_test_utils.rb'
+require 'corosyncconf.rb'
+
+class TestCorosyncConfSection < Test::Unit::TestCase
+  def test_empty_section
+    section = CorosyncConf::Section.new('mySection')
+    assert_nil(section.parent)
+    assert_equal(section, section.root)
+    assert_equal('mySection', section.name)
+    assert_equal([], section.attributes)
+    assert_equal([], section.sections)
+    assert_equal('', section.text)
+  end
+
+  def test_attribute_add
+    section = CorosyncConf::Section.new('mySection')
+
+    section.add_attribute('name1', 'value1')
+    assert_equal(
+      [
+        ['name1', 'value1'],
+      ],
+      section.attributes
+    )
+
+    section.add_attribute('name2', 'value2')
+    assert_equal(
+      [
+        ['name1', 'value1'],
+        ['name2', 'value2'],
+      ],
+      section.attributes
+    )
+
+    section.add_attribute('name2', 'value2')
+    assert_equal(
+      [
+        ['name1', 'value1'],
+        ['name2', 'value2'],
+        ['name2', 'value2'],
+      ],
+      section.attributes
+    )
+  end
+
+  def test_attribute_get
+    section = CorosyncConf::Section.new('mySection')
+    section.add_attribute('name1', 'value1')
+    section.add_attribute('name2', 'value2')
+    section.add_attribute('name3', 'value3')
+    section.add_attribute('name2', 'value2a')
+
+    assert_equal(
+      [
+        ['name1', 'value1'],
+        ['name2', 'value2'],
+        ['name3', 'value3'],
+        ['name2', 'value2a'],
+      ],
+      section.attributes
+    )
+    assert_equal(
+      [
+        ['name1', 'value1'],
+      ],
+      section.attributes('name1')
+    )
+    assert_equal(
+      [
+        ['name2', 'value2'],
+        ['name2', 'value2a'],
+      ],
+      section.attributes('name2')
+    )
+    assert_equal(
+      [],
+      section.attributes('nameX')
+    )
+  end
+
+  def test_attribute_set
+    section = CorosyncConf::Section.new('mySection')
+
+    section.set_attribute('name1', 'value1')
+    assert_equal(
+      [
+        ['name1', 'value1'],
+      ],
+      section.attributes
+    )
+
+    section.set_attribute('name1', 'value1')
+    assert_equal(
+      [
+        ['name1', 'value1'],
+      ],
+      section.attributes
+    )
+
+    section.set_attribute('name1', 'value1a')
+    assert_equal(
+      [
+        ['name1', 'value1a'],
+      ],
+      section.attributes
+    )
+
+    section.set_attribute('name2', 'value2')
+    assert_equal(
+      [
+        ['name1', 'value1a'],
+        ['name2', 'value2'],
+      ],
+      section.attributes
+    )
+
+    section.set_attribute('name1', 'value1')
+    assert_equal(
+      [
+        ['name1', 'value1'],
+        ['name2', 'value2'],
+      ],
+      section.attributes
+    )
+
+    section.add_attribute('name3', 'value3')
+    section.add_attribute('name2', 'value2')
+    assert_equal(
+      [
+        ['name1', 'value1'],
+        ['name2', 'value2'],
+        ['name3', 'value3'],
+        ['name2', 'value2'],
+      ],
+      section.attributes
+    )
+    section.set_attribute('name2', 'value2a')
+    assert_equal(
+      [
+        ['name1', 'value1'],
+        ['name2', 'value2a'],
+        ['name3', 'value3'],
+      ],
+      section.attributes
+    )
+
+    section.add_attribute('name1', 'value1')
+    section.add_attribute('name1', 'value1')
+    section.set_attribute('name1', 'value1')
+    assert_equal(
+      [
+        ['name1', 'value1'],
+        ['name2', 'value2a'],
+        ['name3', 'value3'],
+      ],
+      section.attributes
+    )
+  end
+
+  def test_attribute_change
+    section = CorosyncConf::Section.new('mySection')
+    section.add_attribute('name1', 'value1')
+    section.add_attribute('name2', 'value2')
+    section.add_attribute('name3', 'value3')
+    section.add_attribute('name2', 'value2')
+
+    attrib = section.attributes[1]
+    attrib[0] = 'name2a'
+    attrib[1] = 'value2a'
+    assert_equal(
+      [
+        ['name1', 'value1'],
+        ['name2a', 'value2a'],
+        ['name3', 'value3'],
+        ['name2', 'value2'],
+      ],
+      section.attributes
+    )
+  end
+
+  def test_attribute_del
+    section = CorosyncConf::Section.new('mySection')
+    section.add_attribute('name1', 'value1')
+    section.add_attribute('name2', 'value2')
+    section.add_attribute('name3', 'value3')
+    section.add_attribute('name2', 'value2')
+
+    section.del_attribute(section.attributes[1])
+    assert_equal(
+      [
+        ['name1', 'value1'],
+        ['name3', 'value3'],
+      ],
+      section.attributes
+    )
+
+    section.del_attribute(['name3', 'value3'])
+    assert_equal(
+      [
+        ['name1', 'value1'],
+      ],
+      section.attributes
+    )
+
+    section.del_attribute(['name3', 'value3'])
+    assert_equal(
+      [
+        ['name1', 'value1'],
+      ],
+      section.attributes
+    )
+  end
+
+  def test_attribute_del_by_name
+    section = CorosyncConf::Section.new('mySection')
+    section.add_attribute('name1', 'value1')
+    section.add_attribute('name2', 'value2')
+    section.add_attribute('name3', 'value3')
+    section.add_attribute('name2', 'value2')
+
+    section.del_attributes_by_name('nameX')
+    assert_equal(
+      [
+        ['name1', 'value1'],
+        ['name2', 'value2'],
+        ['name3', 'value3'],
+        ['name2', 'value2'],
+      ],
+      section.attributes
+    )
+
+    section.del_attributes_by_name('name2', 'value2')
+    assert_equal(
+      [
+        ['name1', 'value1'],
+        ['name3', 'value3'],
+      ],
+      section.attributes
+    )
+
+    section.add_attribute('name2', 'value2')
+    section.add_attribute('name2', 'value2a')
+    assert_equal(
+      [
+        ['name1', 'value1'],
+        ['name3', 'value3'],
+        ['name2', 'value2'],
+        ['name2', 'value2a'],
+      ],
+      section.attributes
+    )
+    section.del_attributes_by_name('name2', 'value2')
+    assert_equal(
+      [
+        ['name1', 'value1'],
+        ['name3', 'value3'],
+        ['name2', 'value2a'],
+      ],
+      section.attributes
+    )
+
+    section.add_attribute('name3', 'value3a')
+    assert_equal(
+      [
+        ['name1', 'value1'],
+        ['name3', 'value3'],
+        ['name2', 'value2a'],
+        ['name3', 'value3a'],
+      ],
+      section.attributes
+    )
+    section.del_attributes_by_name('name3')
+    assert_equal(
+      [
+        ['name1', 'value1'],
+        ['name2', 'value2a'],
+      ],
+      section.attributes
+    )
+  end
+
+  def test_section_add
+    root = CorosyncConf::Section.new('root')
+    child1 = CorosyncConf::Section.new('child1')
+    child1a = CorosyncConf::Section.new('child1a')
+    child2 = CorosyncConf::Section.new('child2')
+
+    root.add_section(child1)
+    child1.add_section(child1a)
+    root.add_section(child2)
+    assert_nil(root.parent)
+    assert_equal('root', child1.parent.name)
+    assert_equal('child1', child1a.parent.name)
+    assert_equal('root', child2.parent.name)
+    assert_equal("\
+child1 {
+    child1a {
+    }
+}
+
+child2 {
+}
+",
+      root.text
+    )
+
+    child2.add_section(child1a)
+    assert_equal('child2', child1a.parent.name)
+    assert_equal("\
+child1 {
+}
+
+child2 {
+    child1a {
+    }
+}
+",
+      root.text
+    )
+
+    assert_raise CorosyncConf::CircularParentshipException do
+      child1a.add_section(child1a)
+    end
+    assert_raise CorosyncConf::CircularParentshipException do
+      child1a.add_section(child2)
+    end
+    assert_raise CorosyncConf::CircularParentshipException do
+      child1a.add_section(root)
+    end
+  end
+
+  def test_section_get
+    root = CorosyncConf::Section.new('root')
+    child1 = CorosyncConf::Section.new('child1')
+    child2 = CorosyncConf::Section.new('child2')
+    childa1 = CorosyncConf::Section.new('childA')
+    childa2 = CorosyncConf::Section.new('childA')
+    childa3 = CorosyncConf::Section.new('childA')
+    childa4 = CorosyncConf::Section.new('childA')
+    childb1 = CorosyncConf::Section.new('childB')
+    childb2 = CorosyncConf::Section.new('childB')
+    childa1.add_attribute('id', '1')
+    childa2.add_attribute('id', '2')
+    childa3.add_attribute('id', '3')
+    childa4.add_attribute('id', '4')
+    childb1.add_attribute('id', '5')
+    childb2.add_attribute('id', '6')
+    root.add_section(child1)
+    root.add_section(child2)
+    child1.add_section(childa1)
+    child1.add_section(childa2)
+    child1.add_section(childb1)
+    child2.add_section(childa3)
+    child2.add_section(childb2)
+    child2.add_section(childa4)
+    assert_equal("\
+child1 {
+    childA {
+        id: 1
+    }
+
+    childA {
+        id: 2
+    }
+
+    childB {
+        id: 5
+    }
+}
+
+child2 {
+    childA {
+        id: 3
+    }
+
+    childB {
+        id: 6
+    }
+
+    childA {
+        id: 4
+    }
+}
+",
+      root.text
+    )
+
+    assert_equal("\
+child1 {
+    childA {
+        id: 1
+    }
+
+    childA {
+        id: 2
+    }
+
+    childB {
+        id: 5
+    }
+}
+---
+child2 {
+    childA {
+        id: 3
+    }
+
+    childB {
+        id: 6
+    }
+
+    childA {
+        id: 4
+    }
+}
+",
+      root.sections.collect { |section| section.text }.join("---\n")
+    )
+
+    assert_equal("\
+child1 {
+    childA {
+        id: 1
+    }
+
+    childA {
+        id: 2
+    }
+
+    childB {
+        id: 5
+    }
+}
+",
+      root.sections('child1').collect { |section| section.text }.join("---\n")
+    )
+
+    assert_equal("\
+childA {
+    id: 1
+}
+---
+childA {
+    id: 2
+}
+",
+      child1.sections('childA').collect { |section| section.text }.join("---\n")
+    )
+
+    assert_equal(
+      '',
+      child1.sections('child2').collect { |section| section.text }.join("---\n")
+    )
+  end
+
+  def test_section_del
+    root = CorosyncConf::Section.new('')
+    child1 = CorosyncConf::Section.new('child1')
+    child2 = CorosyncConf::Section.new('child2')
+    childa1 = CorosyncConf::Section.new('childA')
+    childa2 = CorosyncConf::Section.new('childA')
+    childa3 = CorosyncConf::Section.new('childA')
+    childa4 = CorosyncConf::Section.new('childA')
+    childb1 = CorosyncConf::Section.new('childB')
+    childb2 = CorosyncConf::Section.new('childB')
+    childa1.add_attribute('id', '1')
+    childa2.add_attribute('id', '2')
+    childa3.add_attribute('id', '3')
+    childa4.add_attribute('id', '4')
+    childb1.add_attribute('id', '5')
+    childb2.add_attribute('id', '6')
+    root.add_section(child1)
+    root.add_section(child2)
+    child1.add_section(childa1)
+    child1.add_section(childa2)
+    child1.add_section(childb1)
+    child2.add_section(childa3)
+    child2.add_section(childb2)
+    child2.add_section(childa4)
+    assert_equal("\
+child1 {
+    childA {
+        id: 1
+    }
+
+    childA {
+        id: 2
+    }
+
+    childB {
+        id: 5
+    }
+}
+
+child2 {
+    childA {
+        id: 3
+    }
+
+    childB {
+        id: 6
+    }
+
+    childA {
+        id: 4
+    }
+}
+",
+      root.text
+    )
+
+    child2.del_section(childb2)
+    assert_nil(childb2.parent)
+    assert_equal("\
+child1 {
+    childA {
+        id: 1
+    }
+
+    childA {
+        id: 2
+    }
+
+    childB {
+        id: 5
+    }
+}
+
+child2 {
+    childA {
+        id: 3
+    }
+
+    childA {
+        id: 4
+    }
+}
+",
+      root.text
+    )
+
+    root.del_section(child2)
+    assert_nil(child2.parent)
+    assert_equal("\
+child1 {
+    childA {
+        id: 1
+    }
+
+    childA {
+        id: 2
+    }
+
+    childB {
+        id: 5
+    }
+}
+",
+      root.text
+    )
+
+    root.del_section(child2)
+
+    assert_equal('child1', childa1.parent.name)
+    child2.del_section(childa1)
+    assert_equal('child1', childa1.parent.name)
+
+    child1.del_section(childb1)
+    assert_nil(childb1.parent)
+    assert_equal("\
+child1 {
+    childA {
+        id: 1
+    }
+
+    childA {
+        id: 2
+    }
+}
+",
+      root.text
+    )
+
+    child1.del_section(childa1)
+    assert_nil(childa1.parent)
+    child1.del_section(childa2)
+    assert_nil(childa2.parent)
+    assert_equal("\
+child1 {
+}
+",
+      root.text
+    )
+
+    root.del_section(child1)
+    assert_nil(child1.parent)
+    assert_equal('', root.text)
+  end
+
+  def test_get_root
+    root = CorosyncConf::Section.new('root')
+    child1 = CorosyncConf::Section.new('child1')
+    child1a = CorosyncConf::Section.new('child1a')
+    root.add_section(child1)
+    child1.add_section(child1a)
+
+    assert_equal('root', root.root.name)
+    assert_equal('root', child1.root.name)
+    assert_equal('root', child1a.root.name)
+  end
+
+  def test_text
+    root = CorosyncConf::Section.new('root')
+    assert_equal('', root.text)
+
+    root.add_attribute("name1", "value1")
+    assert_equal("name1: value1\n", root.text)
+
+    root.add_attribute("name2", "value2")
+    root.add_attribute("name2", "value2a")
+    root.add_attribute("name3", "value3")
+    assert_equal("\
+name1: value1
+name2: value2
+name2: value2a
+name3: value3
+",
+      root.text
+    )
+
+    child1 = CorosyncConf::Section.new('child1')
+    root.add_section(child1)
+    assert_equal("\
+name1: value1
+name2: value2
+name2: value2a
+name3: value3
+
+child1 {
+}
+",
+      root.text
+    )
+
+    child1.add_attribute("name1.1", "value1.1")
+    child1.add_attribute("name1.2", "value1.2")
+    assert_equal("\
+name1: value1
+name2: value2
+name2: value2a
+name3: value3
+
+child1 {
+    name1.1: value1.1
+    name1.2: value1.2
+}
+",
+      root.text
+    )
+
+    child2 = CorosyncConf::Section.new('child2')
+    child2.add_attribute("name2.1", "value2.1")
+    root.add_section(child2)
+    assert_equal("\
+name1: value1
+name2: value2
+name2: value2a
+name3: value3
+
+child1 {
+    name1.1: value1.1
+    name1.2: value1.2
+}
+
+child2 {
+    name2.1: value2.1
+}
+",
+      root.text
+    )
+
+    child2a = CorosyncConf::Section.new('child2a')
+    child2a.add_attribute("name2.a.1", "value2.a.1")
+    child2.add_section(child2a)
+    assert_equal("\
+name1: value1
+name2: value2
+name2: value2a
+name3: value3
+
+child1 {
+    name1.1: value1.1
+    name1.2: value1.2
+}
+
+child2 {
+    name2.1: value2.1
+
+    child2a {
+        name2.a.1: value2.a.1
+    }
+}
+",
+      root.text
+    )
+
+    child3 = CorosyncConf::Section.new('child3')
+    root.add_section(child3)
+    child3.add_section(CorosyncConf::Section.new('child3a'))
+    child3.add_section(CorosyncConf::Section.new('child3b'))
+    assert_equal("\
+name1: value1
+name2: value2
+name2: value2a
+name3: value3
+
+child1 {
+    name1.1: value1.1
+    name1.2: value1.2
+}
+
+child2 {
+    name2.1: value2.1
+
+    child2a {
+        name2.a.1: value2.a.1
+    }
+}
+
+child3 {
+    child3a {
+    }
+
+    child3b {
+    }
+}
+",
+      root.text
+    )
+  end
+end
+
+class TestCorosyncConfParser < Test::Unit::TestCase
+  def test_empty
+    assert_equal('', CorosyncConf::parse_string('').text)
+  end
+
+  def test_attributes
+    string = "\
+name:value\
+"
+    parsed = "\
+name: value
+"
+    assert_equal(parsed, CorosyncConf::parse_string(string).text)
+
+    string = "\
+name:value
+name:value
+"
+    parsed = "\
+name: value
+name: value
+"
+    assert_equal(parsed, CorosyncConf::parse_string(string).text)
+
+    string = "\
+  name1:value1  
+name2  :value2
+name3:  value3
+  name4  :  value4  
+"
+    parsed = "\
+name1: value1
+name2: value2
+name3: value3
+name4: value4
+"
+    assert_equal(parsed, CorosyncConf::parse_string(string).text)
+
+    string = "\
+name:foo:value
+"
+    parsed = "\
+name: foo:value
+"
+    root = CorosyncConf::parse_string(string)
+    assert_equal(
+      [['name', 'foo:value']],
+      root.attributes
+    )
+    assert_equal(parsed, root.text)
+
+    string = "\
+name :  
+"
+    parsed = "\
+name: 
+"
+    root = CorosyncConf::parse_string(string)
+    assert_equal(
+      [['name', '']],
+      root.attributes
+    )
+    assert_equal(parsed, root.text)
+  end
+
+  def test_section
+    string = "\
+section1 {
+}\
+"
+    parsed = "\
+section1 {
+}
+"
+    assert_equal(parsed, CorosyncConf::parse_string(string).text)
+
+    string = "\
+section1 {
+    section1a   {
+  }
+  section1b        {       
+     }    
+}
+"
+    parsed = "\
+section1 {
+    section1a {
+    }
+
+    section1b {
+    }
+}
+"
+    assert_equal(parsed, CorosyncConf::parse_string(string).text)
+
+    string = "\
+section1 {
+    section1a junk1 { junk2
+    junk3 } junk4
+    section1b junk5{junk6
+    junk7}junk8
+}
+section2 {
+   section2a {
+   }
+   section2b {
+   }
+}
+"
+    parsed = "\
+section1 {
+    section1a junk1 {
+    }
+
+    section1b junk5 {
+    }
+}
+
+section2 {
+    section2a {
+    }
+
+    section2b {
+    }
+}
+"
+    assert_equal(parsed, CorosyncConf::parse_string(string).text)
+
+    string = "\
+section1 {
+    section1a {
+    }
+
+    section1b {
+    }
+}
+}
+"
+    assert_raise CorosyncConf::ParseErrorException do
+      CorosyncConf::parse_string(string)
+    end
+
+    string = "\
+section1 {
+    section1a {
+
+    section1b {
+    }
+}
+"
+    assert_raise CorosyncConf::ParseErrorException do
+      CorosyncConf::parse_string(string)
+    end
+
+    string = "\
+section1 {
+"
+    assert_raise CorosyncConf::ParseErrorException do
+      CorosyncConf::parse_string(string)
+    end
+
+    string = "\
+}
+"
+    assert_raise CorosyncConf::ParseErrorException do
+      CorosyncConf::parse_string(string)
+    end
+  end
+
+  def test_comment
+    string= "\
+# junk1
+name1: value1
+  #junk2
+name2: value2#junk3
+name3: value3 #junk4
+name4 # junk5: value4
+#junk6 name5: value5
+#junk7
+"
+    parsed = "\
+name1: value1
+name2: value2#junk3
+name3: value3 #junk4
+name4 # junk5: value4
+"
+    assert_equal(parsed, CorosyncConf::parse_string(string).text)
+
+    string= "\
+# junk1
+section1 { # junk2
+}
+section2 # junk2 {
+}
+section3 {
+} #junk3
+"
+    parsed = "\
+section1 {
+}
+
+section2 # junk2 {
+}
+
+section3 {
+}
+"
+    assert_equal(parsed, CorosyncConf::parse_string(string).text)
+
+    string = "\
+section {
+#}
+"
+    assert_raise CorosyncConf::ParseErrorException do
+      CorosyncConf::parse_string(string)
+    end
+
+    string = "\
+#section {
+}
+"""
+    assert_raise CorosyncConf::ParseErrorException do
+      CorosyncConf::parse_string(string)
+    end
+  end
+
+  def test_full
+    string = "\
+# Please read the corosync.conf.5 manual page
+totem {
+	version: 2
+
+	# crypto_cipher and crypto_hash: Used for mutual node authentication.
+	# If you choose to enable this, then do remember to create a shared
+	# secret with 'corosync-keygen'.
+	# enabling crypto_cipher, requires also enabling of crypto_hash.
+	crypto_cipher: none
+	crypto_hash: none
+
+	# interface: define at least one interface to communicate
+	# over. If you define more than one interface stanza, you must
+	# also set rrp_mode.
+	interface {
+                # Rings must be consecutively numbered, starting at 0.
+		ringnumber: 0
+		# This is normally the *network* address of the
+		# interface to bind to. This ensures that you can use
+		# identical instances of this configuration file
+		# across all your cluster nodes, without having to
+		# modify this option.
+		bindnetaddr: 192.168.1.0
+		# However, if you have multiple physical network
+		# interfaces configured for the same subnet, then the
+		# network address alone is not sufficient to identify
+		# the interface Corosync should bind to. In that case,
+		# configure the *host* address of the interface
+		# instead:
+		# bindnetaddr: 192.168.1.1
+		# When selecting a multicast address, consider RFC
+		# 2365 (which, among other things, specifies that
+		# 239.255.x.x addresses are left to the discretion of
+		# the network administrator). Do not reuse multicast
+		# addresses across multiple Corosync clusters sharing
+		# the same network.
+		mcastaddr: 239.255.1.1
+		# Corosync uses the port you specify here for UDP
+		# messaging, and also the immediately preceding
+		# port. Thus if you set this to 5405, Corosync sends
+		# messages over UDP ports 5405 and 5404.
+		mcastport: 5405
+		# Time-to-live for cluster communication packets. The
+		# number of hops (routers) that this ring will allow
+		# itself to pass. Note that multicast routing must be
+		# specifically enabled on most network routers.
+		ttl: 1
+	}
+}
+
+logging {
+	# Log the source file and line where messages are being
+	# generated. When in doubt, leave off. Potentially useful for
+	# debugging.
+	fileline: off
+	# Log to standard error. When in doubt, set to no. Useful when
+	# running in the foreground (when invoking 'corosync -f')
+	to_stderr: no
+	# Log to a log file. When set to 'no', the 'logfile' option
+	# must not be set.
+	to_logfile: yes
+	logfile: /var/log/cluster/corosync.log
+	# Log to the system log daemon. When in doubt, set to yes.
+	to_syslog: yes
+	# Log debug messages (very verbose). When in doubt, leave off.
+	debug: off
+	# Log messages with time stamps. When in doubt, set to on
+	# (unless you are only logging to syslog, where double
+	# timestamps can be annoying).
+	timestamp: on
+	logger_subsys {
+		subsys: QUORUM
+		debug: off
+	}
+}
+
+quorum {
+	# Enable and configure quorum subsystem (default: off)
+	# see also corosync.conf.5 and votequorum.5
+	#provider: corosync_votequorum
+}
+"
+    parsed = "\
+totem {
+    version: 2
+    crypto_cipher: none
+    crypto_hash: none
+
+    interface {
+        ringnumber: 0
+        bindnetaddr: 192.168.1.0
+        mcastaddr: 239.255.1.1
+        mcastport: 5405
+        ttl: 1
+    }
+}
+
+logging {
+    fileline: off
+    to_stderr: no
+    to_logfile: yes
+    logfile: /var/log/cluster/corosync.log
+    to_syslog: yes
+    debug: off
+    timestamp: on
+
+    logger_subsys {
+        subsys: QUORUM
+        debug: off
+    }
+}
+
+quorum {
+}
+"
+    assert_equal(parsed, CorosyncConf::parse_string(string).text)
+
+    string = "\
+# Please read the corosync.conf.5 manual page
+totem {
+	version: 2
+
+	crypto_cipher: none
+	crypto_hash: none
+
+	interface {
+		ringnumber: 0
+		bindnetaddr: 10.16.35.0
+		mcastport: 5405
+		ttl: 1
+	}
+	transport: udpu
+}
+
+logging {
+	fileline: off
+	to_logfile: yes
+	to_syslog: yes
+	logfile: /var/log/cluster/corosync.log
+	debug: off
+	timestamp: on
+	logger_subsys {
+		subsys: QUORUM
+		debug: off
+	}
+}
+
+nodelist {
+	node {
+		ring0_addr: 10.16.35.101
+		nodeid: 1
+	}
+
+	node {
+		ring0_addr: 10.16.35.102
+		nodeid: 2
+	}
+
+	node {
+		ring0_addr: 10.16.35.103
+	}
+
+	node {
+		ring0_addr: 10.16.35.104
+	}
+
+	node {
+		ring0_addr: 10.16.35.105
+	}
+}
+
+quorum {
+	# Enable and configure quorum subsystem (default: off)
+	# see also corosync.conf.5 and votequorum.5
+	#provider: corosync_votequorum
+}
+"
+    parsed = "\
+totem {
+    version: 2
+    crypto_cipher: none
+    crypto_hash: none
+    transport: udpu
+
+    interface {
+        ringnumber: 0
+        bindnetaddr: 10.16.35.0
+        mcastport: 5405
+        ttl: 1
+    }
+}
+
+logging {
+    fileline: off
+    to_logfile: yes
+    to_syslog: yes
+    logfile: /var/log/cluster/corosync.log
+    debug: off
+    timestamp: on
+
+    logger_subsys {
+        subsys: QUORUM
+        debug: off
+    }
+}
+
+nodelist {
+    node {
+        ring0_addr: 10.16.35.101
+        nodeid: 1
+    }
+
+    node {
+        ring0_addr: 10.16.35.102
+        nodeid: 2
+    }
+
+    node {
+        ring0_addr: 10.16.35.103
+    }
+
+    node {
+        ring0_addr: 10.16.35.104
+    }
+
+    node {
+        ring0_addr: 10.16.35.105
+    }
+}
+
+quorum {
+}
+"
+    assert_equal(parsed, CorosyncConf::parse_string(string).text)
+  end
+end
diff --git a/pcsd/test/test_pcs.rb b/pcsd/test/test_pcs.rb
new file mode 100644
index 0000000..eb58f2f
--- /dev/null
+++ b/pcsd/test/test_pcs.rb
@@ -0,0 +1,257 @@
+require 'test/unit'
+require 'fileutils'
+require 'json'
+require 'rexml/document'
+
+require 'pcs.rb'
+
+class TestGetNodesAttributes < Test::Unit::TestCase
+  def test_empty
+    cib = '
+<cib>
+  <configuration>
+    <nodes>
+      <node uname="node1">
+        <instance_attributes/>
+      </node>
+      <node uname="node2"/>
+      <node uname="node3">
+        <instance_attributes/>
+      </node>
+    </nodes>
+  </configuration>
+</cib>'
+    cib_dom = REXML::Document.new(cib)
+    assert_equal({}, get_node_attributes(nil, cib_dom))
+  end
+
+  def test_bad_path
+    cib = '
+<cib>
+  <configuration>
+    <nodes>
+      <node uname="node1">
+        <instance_attributes/>
+        <nvpair name="test1" value="test2"/>
+      </node>
+      <node uname="node2"/>
+      <node uname="node3">
+        <instance_attributes/>
+      </node>
+    </nodes>
+  </configuration>
+  <nvpair name="test" value="testval"/>
+</cib>
+'
+    cib_dom = REXML::Document.new(cib)
+    assert_equal({}, get_node_attributes(nil, cib_dom))
+  end
+
+  def test_attributes
+    cib = '
+<cib>
+  <configuration>
+    <nodes>
+      <node id="1" uname="node1">
+        <instance_attributes id="nodes-1"/>
+      </node>
+      <node id="2" uname="node2">
+        <instance_attributes id="nodes-2">
+          <nvpair id="nodes-2-test" name="test" value="44"/>
+        </instance_attributes>
+      </node>
+      <node id="3" uname="node3">
+        <instance_attributes id="nodes-3">
+          <nvpair id="nodes-3-test" name="test" value="testval2"/>
+          <nvpair id="nodes-3-test2" name="test2" value="1"/>
+          <nvpair id="nodes-3-test321" name="test321" value="321"/>
+        </instance_attributes>
+      </node>
+    </nodes>
+  </configuration>
+</cib>
+'
+    cib_dom = REXML::Document.new(cib)
+    expected = {}
+    expected['node2'] = JSON.parse(
+      '[
+        {
+          "id": "nodes-2-test",
+          "key": "test",
+          "value": "44"
+        }
+      ]', {:symbolize_names => true})
+    expected['node3'] = JSON.parse(
+      '[
+        {
+          "id": "nodes-3-test",
+          "key": "test",
+          "value": "testval2"
+        },
+        {
+          "id": "nodes-3-test2",
+          "key": "test2",
+          "value": "1"
+        },
+        {
+          "id": "nodes-3-test321",
+          "key": "test321",
+          "value": "321"
+        }
+      ]', {:symbolize_names => true})
+    assert_equal(expected, get_node_attributes(nil, cib_dom))
+  end
+end
+
+class TestGetFenceLevels < Test::Unit::TestCase
+  def test_empty
+    cib = '
+<cib>
+  <configuration>
+    <fencing-topology/>
+  </configuration>
+</cib>'
+    cib_dom = REXML::Document.new(cib)
+    assert_equal({}, get_fence_levels(nil, cib_dom))
+  end
+
+  def test_bad_path
+    cib = '
+<cib>
+  <configuration>
+    <fencing-topology/>
+    <fencing-level devices="node2-stonith" id="fl-node3-33" index="33" target="node3"/>
+  </configuration>
+  <fencing-level devices="node1-stonith" id="fl-node1-1" index="1" target="node1"/>
+</cib>'
+    cib_dom = REXML::Document.new(cib)
+    assert_equal({}, get_fence_levels(nil, cib_dom))
+  end
+
+  def test_levels
+    cib = '
+<cib>
+  <configuration>
+    <fencing-topology>
+      <fencing-level devices="node1-stonith" id="fl-node1-1" index="1" target="node1"/>
+      <fencing-level devices="node2-stonith" id="fl-node1-2" index="2" target="node1"/>
+      <fencing-level devices="node1-stonith" id="fl-node3-121" index="121" target="node3"/>
+      <fencing-level devices="node3-stonith" id="fl-node3-312" index="312" target="node3"/>
+      <fencing-level devices="node2-stonith" id="fl-node3-33" index="33" target="node3"/>
+    </fencing-topology>
+  </configuration>
+</cib>'
+    cib_dom = REXML::Document.new(cib)
+    expected_json = '
+{
+  "node1": [
+    {
+      "level": "1",
+      "devices": "node1-stonith"
+    },
+    {
+      "level": "2",
+      "devices": "node2-stonith"
+    }
+  ],
+  "node3": [
+    {
+      "level": "33",
+      "devices": "node2-stonith"
+    },
+    {
+      "level": "121",
+      "devices": "node1-stonith"
+    },
+    {
+      "level": "312",
+      "devices": "node3-stonith"
+    }
+  ]
+}
+'
+    assert_equal(JSON.parse(expected_json), get_fence_levels(nil, cib_dom))
+  end
+end
+
+class TestGetAcls < Test::Unit::TestCase
+  def test_empty
+    cib = '
+<cib>
+  <configuration>
+    <acls/>
+  </configuration>
+</cib>'
+    cib_dom = REXML::Document.new(cib)
+    expected = {"group"=>{}, "role"=>{}, "target"=>{}, "user"=>{}}
+    assert_equal(expected, get_acls(nil, cib_dom))
+  end
+
+  def test_bad_path
+    cib = '
+<cib>
+  <configuration>
+    <acls/>
+    <acl_role id="test">
+      <acl_permission id="test1" kind="read" reference="test-ref"/>
+    </acl_role>
+    <acl_target id="target_id">
+      <role id="test"/>
+    </acl_target>
+  </configuration>
+</cib>'
+    cib_dom = REXML::Document.new(cib)
+    expected = {"group"=>{}, "role"=>{}, "target"=>{}, "user"=>{}}
+    assert_equal(expected, get_acls(nil, cib_dom))
+  end
+
+  def test_acls
+    cib = '
+<cib>
+  <configuration>
+    <acls>
+      <acl_role description="testing" id="test">
+        <acl_permission id="test-read" kind="read" xpath="/*"/>
+        <acl_permission id="test-write" kind="write" reference="test-read"/>
+      </acl_role>
+      <acl_target id="test2">
+        <role id="test"/>
+      </acl_target>
+      <acl_group id="testgroup">
+        <role id="test"/>
+      </acl_group>
+    </acls>
+  </configuration>
+</cib>'
+    cib_dom = REXML::Document.new(cib)
+    expected_json = '
+{
+  "role": {
+    "test": {
+      "description": "testing",
+      "permissions": [
+        "read xpath /* (test-read)",
+        "write id test-read (test-write)"
+      ]
+    }
+  },
+  "group": {
+    "testgroup": [
+      "test"
+    ]
+  },
+  "user": {
+    "test2": [
+      "test"
+    ]
+  },
+  "target": {
+    "test2": [
+      "test"
+    ]
+  }
+}
+    '
+    assert_equal(JSON.parse(expected_json), get_acls(nil, cib_dom))
+  end
+end
diff --git a/pcsd/test/test_permissions.rb b/pcsd/test/test_permissions.rb
new file mode 100644
index 0000000..6628592
--- /dev/null
+++ b/pcsd/test/test_permissions.rb
@@ -0,0 +1,498 @@
+require 'test/unit'
+
+require 'pcsd_test_utils.rb'
+require 'permissions.rb'
+
+class TestPermissions < Test::Unit::TestCase
+
+  def test_is_user_type()
+    assert_equal(true, Permissions::is_user_type(Permissions::TYPE_USER))
+    assert_equal(true, Permissions::is_user_type(Permissions::TYPE_GROUP))
+    assert_equal(false, Permissions::is_user_type(''))
+    assert_equal(false, Permissions::is_user_type('nonsense'))
+  end
+
+  def test_is_permission_type()
+    assert_equal(true, Permissions::is_permission_type(Permissions::READ))
+    assert_equal(true, Permissions::is_permission_type(Permissions::WRITE))
+    assert_equal(true, Permissions::is_permission_type(Permissions::GRANT))
+    assert_equal(true, Permissions::is_permission_type(Permissions::FULL))
+    assert_equal(false, Permissions::is_permission_type(''))
+    assert_equal(false, Permissions::is_permission_type('nonsense'))
+  end
+end
+
+
+class TestEntityPermissions < Test::Unit::TestCase
+
+  def setup
+    $logger = MockLogger.new
+  end
+
+  def test_applies_to()
+    ep = Permissions::EntityPermissions.new(Permissions::TYPE_USER, 'user', [])
+    assert_equal(true, ep.applies_to(Permissions::TYPE_USER, 'user'))
+    assert_equal(false, ep.applies_to(Permissions::TYPE_USER, 'group'))
+    assert_equal(false, ep.applies_to(Permissions::TYPE_GROUP, 'user'))
+    assert_equal(false, ep.applies_to(Permissions::TYPE_GROUP, 'group'))
+
+    ep = Permissions::EntityPermissions.new(Permissions::TYPE_GROUP, 'group', [])
+    assert_equal(false, ep.applies_to(Permissions::TYPE_USER, 'user'))
+    assert_equal(false, ep.applies_to(Permissions::TYPE_USER, 'user'))
+    assert_equal(false, ep.applies_to(Permissions::TYPE_GROUP, 'user'))
+    assert_equal(true, ep.applies_to(Permissions::TYPE_GROUP, 'group'))
+  end
+
+  def test_allows()
+    ep = Permissions::EntityPermissions.new(Permissions::TYPE_USER, 'user', [])
+    assert_equal(false, ep.allows?(Permissions::FULL))
+    assert_equal(false, ep.allows?(Permissions::GRANT))
+    assert_equal(false, ep.allows?(Permissions::WRITE))
+    assert_equal(false, ep.allows?(Permissions::READ))
+
+    ep = Permissions::EntityPermissions.new(Permissions::TYPE_USER, 'user', [
+      Permissions::READ
+    ])
+    assert_equal(false, ep.allows?(Permissions::FULL))
+    assert_equal(false, ep.allows?(Permissions::GRANT))
+    assert_equal(false, ep.allows?(Permissions::WRITE))
+    assert_equal(true, ep.allows?(Permissions::READ))
+
+    ep = Permissions::EntityPermissions.new(Permissions::TYPE_USER, 'user', [
+      Permissions::WRITE
+    ])
+    assert_equal(false, ep.allows?(Permissions::FULL))
+    assert_equal(false, ep.allows?(Permissions::GRANT))
+    assert_equal(true, ep.allows?(Permissions::WRITE))
+    assert_equal(true, ep.allows?(Permissions::READ))
+
+    ep = Permissions::EntityPermissions.new(Permissions::TYPE_USER, 'user', [
+      Permissions::GRANT
+    ])
+    assert_equal(false, ep.allows?(Permissions::FULL))
+    assert_equal(true, ep.allows?(Permissions::GRANT))
+    assert_equal(false, ep.allows?(Permissions::WRITE))
+    assert_equal(false, ep.allows?(Permissions::READ))
+
+    ep = Permissions::EntityPermissions.new(Permissions::TYPE_USER, 'user', [
+      Permissions::FULL
+    ])
+    assert_equal(true, ep.allows?(Permissions::FULL))
+    assert_equal(true, ep.allows?(Permissions::GRANT))
+    assert_equal(true, ep.allows?(Permissions::WRITE))
+    assert_equal(true, ep.allows?(Permissions::READ))
+
+    ep = Permissions::EntityPermissions.new(Permissions::TYPE_USER, 'user', [
+      Permissions::READ, Permissions::WRITE
+    ])
+    assert_equal(false, ep.allows?(Permissions::FULL))
+    assert_equal(false, ep.allows?(Permissions::GRANT))
+    assert_equal(true, ep.allows?(Permissions::WRITE))
+    assert_equal(true, ep.allows?(Permissions::READ))
+
+    ep = Permissions::EntityPermissions.new(Permissions::TYPE_USER, 'user', [
+      Permissions::READ, Permissions::WRITE, Permissions::GRANT
+    ])
+    assert_equal(false, ep.allows?(Permissions::FULL))
+    assert_equal(true, ep.allows?(Permissions::GRANT))
+    assert_equal(true, ep.allows?(Permissions::WRITE))
+    assert_equal(true, ep.allows?(Permissions::READ))
+
+    ep = Permissions::EntityPermissions.new(Permissions::TYPE_USER, 'user', [
+      Permissions::READ, Permissions::WRITE, Permissions::GRANT, Permissions::FULL
+    ])
+    assert_equal(true, ep.allows?(Permissions::FULL))
+    assert_equal(true, ep.allows?(Permissions::GRANT))
+    assert_equal(true, ep.allows?(Permissions::WRITE))
+    assert_equal(true, ep.allows?(Permissions::READ))
+  end
+
+  def test_merge!()
+    ep = Permissions::EntityPermissions.new(Permissions::TYPE_USER, 'user', [
+      Permissions::READ
+    ])
+    assert_equal(false, ep.allows?(Permissions::FULL))
+    assert_equal(false, ep.allows?(Permissions::GRANT))
+    assert_equal(false, ep.allows?(Permissions::WRITE))
+    assert_equal(true, ep.allows?(Permissions::READ))
+
+    ep.merge!(Permissions::EntityPermissions.new(Permissions::TYPE_USER, 'user', [
+      Permissions::GRANT
+    ]))
+    assert_equal(false, ep.allows?(Permissions::FULL))
+    assert_equal(true, ep.allows?(Permissions::GRANT))
+    assert_equal(false, ep.allows?(Permissions::WRITE))
+    assert_equal(true, ep.allows?(Permissions::READ))
+  end
+
+end
+
+
+class TestPermissionsSet < Test::Unit::TestCase
+
+  def setup
+    $logger = MockLogger.new
+  end
+
+  def test_allows_empty
+    perms = Permissions::PermissionsSet.new([])
+
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::FULL))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::GRANT))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::WRITE))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user1', [], Permissions::FULL))
+    assert_equal(false, perms.allows?('user1', [], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user1', [], Permissions::WRITE))
+    assert_equal(false, perms.allows?('user1', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::FULL))
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::WRITE))
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::READ))
+  end
+
+  def test_allows_user
+    perms = Permissions::PermissionsSet.new([
+      Permissions::EntityPermissions.new(
+        Permissions::TYPE_USER, 'user1', []
+      ),
+    ])
+
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::FULL))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::GRANT))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::WRITE))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user1', [], Permissions::FULL))
+    assert_equal(false, perms.allows?('user1', [], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user1', [], Permissions::WRITE))
+    assert_equal(false, perms.allows?('user1', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::FULL))
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::WRITE))
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::READ))
+
+
+    perms = Permissions::PermissionsSet.new([
+      Permissions::EntityPermissions.new(
+        Permissions::TYPE_USER, 'user1', [Permissions::WRITE]
+      ),
+    ])
+
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::FULL))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::GRANT))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::WRITE))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user1', [], Permissions::FULL))
+    assert_equal(false, perms.allows?('user1', [], Permissions::GRANT))
+    assert_equal(true, perms.allows?('user1', [], Permissions::WRITE))
+    assert_equal(true, perms.allows?('user1', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::FULL))
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::GRANT))
+    assert_equal(true, perms.allows?('user1', ['group1'], Permissions::WRITE))
+    assert_equal(true, perms.allows?('user1', ['group1'], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user2', [], Permissions::FULL))
+    assert_equal(false, perms.allows?('user2', [], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user2', [], Permissions::WRITE))
+    assert_equal(false, perms.allows?('user2', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user2', ['group1'], Permissions::FULL))
+    assert_equal(false, perms.allows?('user2', ['group1'], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user2', ['group1'], Permissions::WRITE))
+    assert_equal(false, perms.allows?('user2', ['group1'], Permissions::READ))
+
+
+    perms = Permissions::PermissionsSet.new([
+      Permissions::EntityPermissions.new(
+        Permissions::TYPE_USER, 'user1', [Permissions::WRITE]
+      ),
+      Permissions::EntityPermissions.new(
+        Permissions::TYPE_USER, 'user2', [Permissions::GRANT]
+      ),
+    ])
+
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::FULL))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::GRANT))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::WRITE))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user1', [], Permissions::FULL))
+    assert_equal(false, perms.allows?('user1', [], Permissions::GRANT))
+    assert_equal(true, perms.allows?('user1', [], Permissions::WRITE))
+    assert_equal(true, perms.allows?('user1', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::FULL))
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::GRANT))
+    assert_equal(true, perms.allows?('user1', ['group1'], Permissions::WRITE))
+    assert_equal(true, perms.allows?('user1', ['group1'], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user2', [], Permissions::FULL))
+    assert_equal(true, perms.allows?('user2', [], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user2', [], Permissions::WRITE))
+    assert_equal(false, perms.allows?('user2', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user2', ['group1'], Permissions::FULL))
+    assert_equal(true, perms.allows?('user2', ['group1'], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user2', ['group1'], Permissions::WRITE))
+    assert_equal(false, perms.allows?('user2', ['group1'], Permissions::READ))
+  end
+
+  def test_allows_group
+    perms = Permissions::PermissionsSet.new([
+      Permissions::EntityPermissions.new(
+        Permissions::TYPE_GROUP, 'group1', []
+      ),
+    ])
+
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::FULL))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::GRANT))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::WRITE))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user1', [], Permissions::FULL))
+    assert_equal(false, perms.allows?('user1', [], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user1', [], Permissions::WRITE))
+    assert_equal(false, perms.allows?('user1', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::FULL))
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::WRITE))
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::READ))
+
+
+    perms = Permissions::PermissionsSet.new([
+      Permissions::EntityPermissions.new(
+        Permissions::TYPE_GROUP, 'group1', [Permissions::WRITE]
+      ),
+    ])
+
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::FULL))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::GRANT))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::WRITE))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user1', [], Permissions::FULL))
+    assert_equal(false, perms.allows?('user1', [], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user1', [], Permissions::WRITE))
+    assert_equal(false, perms.allows?('user1', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::FULL))
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::GRANT))
+    assert_equal(true, perms.allows?('user1', ['group1'], Permissions::WRITE))
+    assert_equal(true, perms.allows?('user1', ['group1'], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user2', [], Permissions::FULL))
+    assert_equal(false, perms.allows?('user2', [], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user2', [], Permissions::WRITE))
+    assert_equal(false, perms.allows?('user2', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user2', ['group1'], Permissions::FULL))
+    assert_equal(false, perms.allows?('user2', ['group1'], Permissions::GRANT))
+    assert_equal(true, perms.allows?('user2', ['group1'], Permissions::WRITE))
+    assert_equal(true, perms.allows?('user2', ['group1'], Permissions::READ))
+
+
+    perms = Permissions::PermissionsSet.new([
+      Permissions::EntityPermissions.new(
+        Permissions::TYPE_GROUP, 'group1', [Permissions::WRITE]
+      ),
+      Permissions::EntityPermissions.new(
+        Permissions::TYPE_GROUP, 'group2', [Permissions::GRANT]
+      ),
+    ])
+
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::FULL))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::GRANT))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::WRITE))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user1', [], Permissions::FULL))
+    assert_equal(false, perms.allows?('user1', [], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user1', [], Permissions::WRITE))
+    assert_equal(false, perms.allows?('user1', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::FULL))
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::GRANT))
+    assert_equal(true, perms.allows?('user1', ['group1'], Permissions::WRITE))
+    assert_equal(true, perms.allows?('user1', ['group1'], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user1', ['group2'], Permissions::FULL))
+    assert_equal(true, perms.allows?('user1', ['group2'], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user1', ['group2'], Permissions::WRITE))
+    assert_equal(false, perms.allows?('user1', ['group2'], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user1', ['group1', 'group2'], Permissions::FULL))
+    assert_equal(true, perms.allows?('user1', ['group1', 'group2'], Permissions::GRANT))
+    assert_equal(true, perms.allows?('user1', ['group1', 'group2'], Permissions::WRITE))
+    assert_equal(true, perms.allows?('user1', ['group1', 'group2'], Permissions::READ))
+  end
+
+  def test_allows_user_group
+    perms = Permissions::PermissionsSet.new([
+      Permissions::EntityPermissions.new(
+        Permissions::TYPE_USER, 'user1', []
+      ),
+      Permissions::EntityPermissions.new(
+        Permissions::TYPE_GROUP, 'group1', []
+      ),
+    ])
+
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::FULL))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::GRANT))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::WRITE))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user1', [], Permissions::FULL))
+    assert_equal(false, perms.allows?('user1', [], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user1', [], Permissions::WRITE))
+    assert_equal(false, perms.allows?('user1', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::FULL))
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::WRITE))
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::READ))
+
+    assert_equal(
+      [
+        ['debug', 'permission check action=full username=hacluster groups='],
+        ['debug', 'permission granted for superuser'],
+        ['debug', 'permission check action=grant username=hacluster groups='],
+        ['debug', 'permission granted for superuser'],
+        ['debug', 'permission check action=write username=hacluster groups='],
+        ['debug', 'permission granted for superuser'],
+        ['debug', 'permission check action=read username=hacluster groups='],
+        ['debug', 'permission granted for superuser'],
+        ['debug', 'permission check action=full username=user1 groups='],
+        ['debug', 'permission denied'],
+        ['debug', 'permission check action=grant username=user1 groups='],
+        ['debug', 'permission denied'],
+        ['debug', 'permission check action=write username=user1 groups='],
+        ['debug', 'permission denied'],
+        ['debug', 'permission check action=read username=user1 groups='],
+        ['debug', 'permission denied'],
+        ['debug', 'permission check action=full username=user1 groups=group1'],
+        ['debug', 'permission denied'],
+        ['debug', 'permission check action=grant username=user1 groups=group1'],
+        ['debug', 'permission denied'],
+        ['debug', 'permission check action=write username=user1 groups=group1'],
+        ['debug', 'permission denied'],
+        ['debug', 'permission check action=read username=user1 groups=group1'],
+        ['debug', 'permission denied'],
+      ],
+      $logger.log
+    )
+    $logger.clean
+
+    perms = Permissions::PermissionsSet.new([
+      Permissions::EntityPermissions.new(
+        Permissions::TYPE_USER, 'user1', [Permissions::GRANT]
+      ),
+      Permissions::EntityPermissions.new(
+        Permissions::TYPE_GROUP, 'group1', [Permissions::WRITE]
+      ),
+      Permissions::EntityPermissions.new(
+        Permissions::TYPE_USER, 'user3', [Permissions::FULL]
+      ),
+      Permissions::EntityPermissions.new(
+        Permissions::TYPE_GROUP, 'group3', [Permissions::FULL]
+      ),
+    ])
+
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::FULL))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::GRANT))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::WRITE))
+    assert_equal(true, perms.allows?('hacluster', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user1', [], Permissions::FULL))
+    assert_equal(true, perms.allows?('user1', [], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user1', [], Permissions::WRITE))
+    assert_equal(false, perms.allows?('user1', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user1', ['group1'], Permissions::FULL))
+    assert_equal(true, perms.allows?('user1', ['group1'], Permissions::GRANT))
+    assert_equal(true, perms.allows?('user1', ['group1'], Permissions::WRITE))
+    assert_equal(true, perms.allows?('user1', ['group1'], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user2', [], Permissions::FULL))
+    assert_equal(false, perms.allows?('user2', [], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user2', [], Permissions::WRITE))
+    assert_equal(false, perms.allows?('user2', [], Permissions::READ))
+
+    assert_equal(false, perms.allows?('user2', ['group1'], Permissions::FULL))
+    assert_equal(false, perms.allows?('user2', ['group1'], Permissions::GRANT))
+    assert_equal(true, perms.allows?('user2', ['group1'], Permissions::WRITE))
+    assert_equal(true, perms.allows?('user2', ['group1'], Permissions::READ))
+
+    assert_equal(
+      [
+        ['debug', 'permission check action=full username=hacluster groups='],
+        ['debug', 'permission granted for superuser'],
+        ['debug', 'permission check action=grant username=hacluster groups='],
+        ['debug', 'permission granted for superuser'],
+        ['debug', 'permission check action=write username=hacluster groups='],
+        ['debug', 'permission granted for superuser'],
+        ['debug', 'permission check action=read username=hacluster groups='],
+        ['debug', 'permission granted for superuser'],
+        ['debug', 'permission check action=full username=user1 groups='],
+        ['debug', 'permission denied'],
+        ['debug', 'permission check action=grant username=user1 groups='],
+        ['debug', 'permission granted for user user1'],
+        ['debug', 'permission check action=write username=user1 groups='],
+        ['debug', 'permission denied'],
+        ['debug', 'permission check action=read username=user1 groups='],
+        ['debug', 'permission denied'],
+        ['debug', 'permission check action=full username=user1 groups=group1'],
+        ['debug', 'permission denied'],
+        ['debug', 'permission check action=grant username=user1 groups=group1'],
+        ['debug', 'permission granted for user user1'],
+        ['debug', 'permission check action=write username=user1 groups=group1'],
+        ['debug', 'permission granted for group group1'],
+        ['debug', 'permission check action=read username=user1 groups=group1'],
+        ['debug', 'permission granted for group group1'],
+        ['debug', 'permission check action=full username=user2 groups='],
+        ['debug', 'permission denied'],
+        ['debug', 'permission check action=grant username=user2 groups='],
+        ['debug', 'permission denied'],
+        ['debug', 'permission check action=write username=user2 groups='],
+        ['debug', 'permission denied'],
+        ['debug', 'permission check action=read username=user2 groups='],
+        ['debug', 'permission denied'],
+        ['debug', 'permission check action=full username=user2 groups=group1'],
+        ['debug', 'permission denied'],
+        ['debug', 'permission check action=grant username=user2 groups=group1'],
+        ['debug', 'permission denied'],
+        ['debug', 'permission check action=write username=user2 groups=group1'],
+        ['debug', 'permission granted for group group1'],
+        ['debug', 'permission check action=read username=user2 groups=group1'],
+        ['debug', 'permission granted for group group1'],
+      ],
+      $logger.log
+    )
+  end
+
+  def test_merge!
+    perms = Permissions::PermissionsSet.new([
+      Permissions::EntityPermissions.new(
+        Permissions::TYPE_USER, 'user1', [Permissions::GRANT]
+      ),
+      Permissions::EntityPermissions.new(
+        Permissions::TYPE_GROUP, 'user2', [Permissions::FULL]
+      ),
+      Permissions::EntityPermissions.new(
+        Permissions::TYPE_USER, 'user1', [Permissions::READ]
+      ),
+    ])
+
+    assert_equal(false, perms.allows?('user1', [], Permissions::FULL))
+    assert_equal(true, perms.allows?('user1', [], Permissions::GRANT))
+    assert_equal(false, perms.allows?('user1', [], Permissions::WRITE))
+    assert_equal(true, perms.allows?('user1', [], Permissions::READ))
+  end
+
+end
diff --git a/pcsd/test/tokens b/pcsd/test/tokens
new file mode 100644
index 0000000..ee449b1
--- /dev/null
+++ b/pcsd/test/tokens
@@ -0,0 +1,9 @@
+{
+  "format_version": 2,
+  "data_version": 9,
+  "tokens": {
+    "rh7-1": "2a8b40aa-b539-4713-930a-483468d62ef4",
+    "rh7-2": "76174e2c-09e8-4435-b318-5c6b8250a22c",
+    "rh7-3": "55844951-9ae5-4103-bb4a-64f9c1ea0a71"
+  }
+}
diff --git a/pcsd/views/_acls.erb b/pcsd/views/_acls.erb
index 6461cdb..98bb356 100644
--- a/pcsd/views/_acls.erb
+++ b/pcsd/views/_acls.erb
@@ -5,7 +5,7 @@
 <tr {{bind-attr style="Pcs.acls_page"}}>
   <td id="remove_add" class="borderbottom">
     <div class="x sprites"></div>
-    <div class="link" onclick="verify_acl_role_remove();return false;"> Remove    </div>
+    <div class="link" onclick="verify_remove_acl_roles();return false;"> Remove    </div>
     <div class="plus sprites"></div>
     <div class="link" onclick="$('#add_acl_role').dialog({
         title: 'Add ACL Role',
@@ -68,20 +68,31 @@
               <tr><th>Type</th><th>XPath/ID</th><th>Query/ID</th><th>Remove</th></tr>
                 {{#each perm in Pcs.aclsController.cur_role.permissions}}
                 <tr {{bind-attr acl_perm_id="perm.permission_id"}}>
-                    <td>{{perm.type}}</td>
-                    <td>{{perm.xpath_id}}</td>
-                    <td>{{perm.query_id}}</td>
-                    <td style="text-align:center;">
-                      <a onclick="x=$(this);remove_acl_item($(this).closest('tr'),'perm');return false;" href="#" class="remove">X</a>
-                    </td>
-                  </tr>
+                  <td>{{perm.type}}</td>
+                  <td>{{perm.xpath_id}}</td>
+                  <td>{{perm.query_id}}</td>
+                  <td style="text-align:center;">
+                    <a onclick="x=$(this);remove_acl_item($(this).closest('tr'),'perm');return false;" href="#" class="remove">X</a>
+                  </td>
+                </tr>
               {{/each}}
               {{#unless Pcs.aclsController}}
                 <tr><td style="color: gray;">NONE</td><td></td><td></td></tr>
               {{/unless}}
               <tr id="new_acl_perm">
-                <td><select name="role_type"><option>read</option><option>write</option><option>deny</option></select></td>
-                <td><select name="role_xpath_id"><option>XPath</option><option>ID</option></td>
+                <td>
+                  <select name="role_type">
+                    <option value="read">Read</option>
+                    <option value="write">Write</option>
+                    <option value="deny">Deny</option>
+                  </select>
+                </td>
+                <td>
+                  <select name="role_xpath_id">
+                    <option value="xpath">XPath</option>
+                    <option value="id">ID</option>
+                  </select>
+                </td>
                 <td><input type="text" name="role_query_id"></td>
                 <td><button type="button" onclick="add_acl_item('#new_acl_perm', 'perm');" name="add">Add</button></td>
               </tr>
@@ -158,8 +169,5 @@
       </tr>
     </table>
   </div>
-  <div id="remove_acl_roles" style="display:none;">
-    <p style="font-size:12px;">Are you sure you want to remove the following ACL role(s)?</p>
-    <span id="roles_to_remove"></span>
-  </div>
+  <%= erb :_dialogs %>
 </td></td>
diff --git a/pcsd/views/_cluster_list.erb b/pcsd/views/_cluster_list.erb
index deff300..90f084e 100644
--- a/pcsd/views/_cluster_list.erb
+++ b/pcsd/views/_cluster_list.erb
@@ -1,28 +1,65 @@
 <form method=post action="/manage/removecluster">
-  <table cellpadding=0 cellspacing=0 style="float:left;">
+  <table cellpadding=0 cellspacing=0 style="width: 100%; padding-left: 5px;">
     <tr>
-      <%if @clusters.length > 0 %>
-	<th></th><th></th><th>NAME</th><th>NODES</th><th></th>
-      <% end %>
+      <th>All</th>
+      <th><div class="check sprites" title="OK"></div></th>
+      <th><div class="warning sprites" title="Warning"></div></th>
+      <th><div class="error sprites" title="Failed"></div></th>
+      <th style="width: 20px;"><div class="x sprites" title="Unknown"></div></th>
     </tr>
-    <% @clusters.each  do |c| %>
-      <tr onmouseover="$(this).css('background-color', 'e4f5fd');$(this).find('td').last().css('display','');show_cluster_info(this)" onmouseout="$(this).css('background-color','ffffff');$(this).find('td').last().css('display','none');" onclick="window.location='<%=c.ui_address%>'" nodeID="<%=c.name%>">
-	<td class="node_list_check">
-	  <input class="node_list_check" type="checkbox" name="clusterid-<%=c.name%>" res_id="<%=c.name%>">
-	</td>
-	<td class="node_list_sprite">
-	  <div class="check sprites"></div>
-	</td>
-	<td nowrap class="resource_name">
-	  <%= c.name %>
-	</td>
-	<td class="resource_type">
-	  <%= c.num_nodes %>
-	</td>
-	<td style="display:none">
-	  <div class="arrow sprites"></div>
-	</td>
-      </tr>
-    <% end %>
+    <tr>
+      <td>{{ Pcs.clusterController.cluster_list.length }}</td>
+      <td style="padding-left: 5px;">{{ Pcs.clusterController.num_ok }}</td>
+      <td style="padding-left: 5px;">{{ Pcs.clusterController.num_warning }}</td>
+      <td style="padding-left: 5px;">{{ Pcs.clusterController.num_error }}</td>
+      <td style="padding-left: 5px;">{{ Pcs.clusterController.num_unknown }}</td>
+    </tr>
+  </table>
+  <table cellpadding=0 cellspacing=0 id="clusters_list">
+    <tr>
+      {{#if Pcs.clusterController.cluster_list.length}}
+      <th></th><th></th><th>NAME</th><th style="padding-right: 1em;">NODES</th><th>RESOURCES</th><th style="padding-right: 16px;"></th>
+      {{/if}}
+    </tr>
+    {{#each Pcs.clusterController.cluster_list }}
+    <tr onmouseover="hover_over(this);" onmouseout="hover_out(this);" onclick="Pcs.clusterController.update_cur_cluster($(this).attr('nodeID'));" {{bind-attr nodeID="this.name"}}>
+    <td class="node_list_check">
+      <input class="node_list_check" type="checkbox" {{bind-attr name="input_name"}} {{bind-attr res_id="name"}}>
+    </td>
+    <td>
+      {{{status_icon}}}
+    </td>
+    <td nowrap class="resource_name">
+      {{#if forbidden}}
+        {{name}}
+      {{else}}
+        <a {{bind-attr href=url_link}}>{{name}}</a>
+      {{/if}}
+    </td>
+    <td nowrap class="resource_type">
+      {{#if forbidden}}
+        unknown
+      {{else}}
+        {{nodes.length}}
+        {{#if nodes_failed}}
+        | <div style="display: inline-block;" title="Issue(s) found"><span style="font-weight: bold; color: red">{{nodes_failed}}</span></div>
+        {{/if}}
+      {{/if}}
+    </td>
+    <td nowrap class="resource_type">
+      {{#if status_unknown}}
+        unknown
+      {{else}}
+        {{resource_list.length}}
+        {{#if resources_failed}}
+      | <div style="display: inline-block;" title="Issue(s) found"><span style="font-weight: bold; color: red">{{resources_failed}}</span></div>
+        {{/if}}
+      {{/if}}
+    </td>
+    <td>
+      <div style="display:none" class="arrow sprites"></div>
+    </td>
+    </tr>
+    {{/each}}
   </table>
 </form>
diff --git a/pcsd/views/_configure.erb b/pcsd/views/_configure.erb
index 66a412a..9331621 100644
--- a/pcsd/views/_configure.erb
+++ b/pcsd/views/_configure.erb
@@ -1,39 +1,42 @@
-<tr id="configure_title_row" {{bind-attr style="Pcs.configure_page"}}><td id="page_header" colspan=3>
-    <table id="config_options"><tr>
-	<td>CLUSTER PROPERTIES</td>
-	<!--
-	    <td class="configure-general"><a href="/configure/general">General</a></td>
-	    <td class="configure-pacemaker"><a href="/configure/pacemaker">Pacemaker</a></td>
-	    <td class="configure-network"><a href="/configure/network">Network</a></td>
-	    <td class="configure-resource"><a href="/configure/resource">Resource</a></td>
-	    <td class="configure-logging"><a href="/configure/logging">Logging</a></td>
--->
-    </tr></table>
-  </tr>
-  <tr id="configure_header_row" {{bind-attr style="Pcs.configure_page"}}><td colspan=3>
-      <hr>
-  </td></tr>
-  <tr id="configure_list_row" {{bind-attr style="Pcs.configure_page"}}>
-    <td id="config" colspan=3>
-      <form>
-	<% @config_options.each { |page, options| %>
-	  <table>
-	    <% options.each {|co| %>
-              <tr title="<%= h(co.desc)%>">
-		<td class="label"><%= co.name %>:</td>
-		<td><%= co.html %><span class="units"><%= co.units %></span></td>
-	      </tr>
-	    <% } %>
-	  </table>
-	<br>
-      <% } %> 
-	<% if @config_options.length != 0 %>
-	  <input type=submit style="margin-left:20px;" class="text_field" onclick="update_cluster_settings($(this).parent('form')); return false;" value="Apply Changes">
+<tr id="configure_title_row" {{bind-attr style="Pcs.configure_page"}}>
+  <td id="page_header" colspan=3>
+    <table id="config_options">
+      <tr>
+        <td>CLUSTER PROPERTIES</td>
+      </tr>
+    </table>
+  </td>
+</tr>
+<tr id="configure_header_row" {{bind-attr style="Pcs.configure_page"}}>
+  <td colspan=3>
+    <hr>
+  </td>
+</tr>
+<tr id="configure_list_row" {{bind-attr style="Pcs.configure_page"}}>
+  <td id="config" colspan=3>
+    <form>
+      <% @config_options.each { |page, options| %>
+        <table>
+          <% options.each { |co| %>
+            <tr title="<%= h(co.desc) %>">
+              <td class="label"><%= co.name %>:</td>
+              <td><%= co.html %><span class="units"><%= co.units %></span></td>
+            </tr>
+          <% } %>
+        </table>
+      <br>
+      <% } %>
+      <% if @config_options.length != 0 %>
+        <input type="submit" style="margin-left:20px;" class="text_field"
+          onclick="update_cluster_settings($(this).parent('form')); return false;"
+          value="Apply Changes"
+        >
       <% end %>
-      </form>
-    </td>
-  </tr>
-  <tr><td colspan=3>
-      <hr>
+    </form>
+  </td>
+</tr>
+<tr>
+  <td colspan=3>
+    <hr>
   </td>
 </tr>
diff --git a/pcsd/views/_dialogs.erb b/pcsd/views/_dialogs.erb
new file mode 100644
index 0000000..02d8eab
--- /dev/null
+++ b/pcsd/views/_dialogs.erb
@@ -0,0 +1,38 @@
+<div id="auth_nodes" style="display:none;">
+  <form id="auth_nodes_form">
+    Enter password for user 'hacluster' to authenticate nodes.<br>
+    Nodes to authenticate:
+    <table class="err_msg_table" style="width: 100%">
+      <tr><td align=center style="color: red" colspan=2"><span id="auth_failed_error_msg" style="display:none;">Authentication on some nodes failed.</span></td></tr>
+    </table>
+    <table id="auth_nodes_list">
+    </table>
+    <div id="same_pass"><label><input type="checkbox" name="all" onchange="if ($(this).is(':checked')) {$('#auth_nodes_list').find('input:password').each(function(){$(this).hide()}); $('#pass_for_all').show();} else {$('#auth_nodes_list').find('input:password').each(function(){$(this).show()}); $('#pass_for_all').hide();}"> Use same password for all nodes:</label>
+      <input type="password" name="pass-all" id="pass_for_all" style="display: none;"></div>
+  </form>
+</div>
+
+<div id="dialog_verify_remove_clusters" style="display: none;">
+  <p style="font-size:12px;">Are you sure you want to remove the following cluster(s) from the GUI? (This only removes the cluster from the GUI, it does not stop the cluster from running.)</p>
+  <span class="name_list"></span>
+</div>
+
+<div id="dialog_verify_remove_nodes" style="display:none;">
+  <p style="font-size:12px;">Are you sure you want to remove the following nodes(s)?</p>
+  <span class="name_list"></span>
+  {{#if Pcs.is_cman_with_udpu_transport}}
+    <p style="color: orange">This is a CMAN cluster with UDPU transport, cluster restart is required to apply node removal.</p>
+  {{/if}}
+</div>
+
+<div id="dialog_verify_remove_resources" style="display: none;">
+  <p style="font-size:12px;">Are you sure you want to remove the following resource(s)?</p>
+  <span class="name_list"></span>
+  <input type="checkbox" name="force">
+  Enforce removal (Remove the resources without stopping them first.)
+</div>
+
+<div id="dialog_verify_remove_acl_roles" style="display:none;">
+  <p style="font-size:12px;">Are you sure you want to remove the following ACL role(s)?</p>
+  <span class="name_list"></span>
+</div>
diff --git a/pcsd/views/_permissions_cluster.erb b/pcsd/views/_permissions_cluster.erb
new file mode 100644
index 0000000..4048366
--- /dev/null
+++ b/pcsd/views/_permissions_cluster.erb
@@ -0,0 +1,120 @@
+<div id="node_info_header">
+  <div id="node_info_header_title">
+    PERMISSIONS FOR CLUSTER <%= h(@cluster_name).upcase %>
+  </div>
+</div>
+<div id="node_sub_info">
+  <script type="text/javascript">
+    if (typeof permissions_dependencies === "undefined") {
+      var permissions_dependencies = {};
+    }
+  </script>
+  <% if @error %>
+    <script type="text/javascript">
+      permissions_dependencies["<%= h(@cluster_name) %>"] = {};
+    </script>
+    <b>Error: </b><span style="color:red;"><%= h(@error) %></span>
+    <div style="padding-top:20px;">
+      <button type="button"
+        onclick="
+          show_loading_screen();
+          permissions_load_cluster('<%= h(@cluster_name)%>', hide_loading_screen);
+        "
+      >Refresh</button>
+    </div>
+  <% else %>
+    <script type="text/javascript">
+      permissions_dependencies["<%= h(@cluster_name) %>"] = <%= @permissions_dependencies.to_json %>;
+    </script>
+    <form method="post" action="/permissions_save/">
+      <input type="hidden" name="cluster_name" value="<%= h(@cluster_name) %>">
+      <table class="datatable">
+        <tr>
+          <th>Name</th>
+          <th>Type</th>
+          <% @permission_types.each { |type| %>
+            <th title="<%= h(type['description']) %>"><%= h(type['label']) %></th>
+          <% } %>
+          <th>Remove</th>
+        </tr>
+        <% @users_permissions.each_with_index { |user, index| %>
+          <tr>
+            <td>
+              <input type="hidden" name="permissions[<%= index %>][name]"
+                value="<%= h(user['name']) %>"
+              >
+              <%= h(user['name']) %>
+            </td>
+            <td>
+              <input type="hidden" name="permissions[<%= index%>][type]"
+                value="<%= h(user['type']) %>"
+              >
+              <%= h(user['type']) %>
+            </td>
+            <% @permission_types.each { |perm| %>
+              <td title="<%= h(perm['label']) %>: <%= h(perm['description']) %>">
+                <input type="checkbox" value="1"
+                  name="permissions[<%= index %>][allow][<%= h(perm['code']) %>]"
+                  <% if user['allow'].include?(perm['code']) %>
+                    checked="checked"
+                  <% end %>
+                  onchange="
+                    permissions_fix_dependent_checkboxes(this);
+                    permissions_cluster_dirty_flag('<%= h(@cluster_name) %>', true);
+                  "
+                >
+              </td>
+            <% } %>
+            <td style="text-align:center;">
+              <a class="remove" href="#" onclick="permission_remove_row(this);">X</a>
+            </td>
+          </tr>
+        <% } %>
+        <tr>
+          <td>
+            <input type="text" name="permissions_new[name]">
+          </td>
+          <td>
+            <select name="permissions_new[type]">
+              <% @user_types.each { |type| %>
+                <option value="<%= h(type['code']) %>"><%= h(type['label']) %></option>
+              <% } %>
+            </select>
+          </td>
+          <% @permission_types.each { |perm| %>
+            <td title="<%= h(perm['label']) %>: <%= h(perm['description']) %>">
+              <input type="checkbox" value="1"
+                name="permissions_new[allow][<%= h(perm['code']) %>]"
+                onchange="permissions_fix_dependent_checkboxes(this);"
+              >
+            </td>
+          <% } %>
+          <td style="text-align:center;">
+            <button type="button"
+              onclick="
+                this.disabled = true;
+                permissions_add_row($(this).parents('tr').first());
+                this.disabled = false;
+              "
+            >Add</button>
+          </td>
+        </tr>
+      </table>
+      <div style="padding-top:20px;">
+        <button type="button"
+          onclick="
+            show_loading_screen();
+            permissions_load_cluster('<%= h(@cluster_name) %>', hide_loading_screen);
+          "
+        >Refresh</button>
+        <button type="button"
+          onclick="
+            this.disabled = true;
+            permissions_save_cluster($(this).parents('form').first());
+            this.disabled = false;
+          "
+        >Apply Changes</button>
+      </div>
+    </form>
+  <% end %>
+</div>
diff --git a/pcsd/views/_resource.erb b/pcsd/views/_resource.erb
index 8de0c57..7e4cf39 100644
--- a/pcsd/views/_resource.erb
+++ b/pcsd/views/_resource.erb
@@ -7,9 +7,9 @@
     <td id="remove_add" class="borderbottom">
       <div class="x sprites"></div><div class=link"> 
 	<% if @myView == "resource" %>
-	  <a href="#" onclick='verify_remove("resource");return false;'>
+	  <a href="#" onclick='verify_remove_resources(); return false;'>
 	<% else %>
-	  <a href="#" onclick='verify_remove("stonith", null, "Remove device(s)", "Fence Device Removal");return false;'>
+	  <a href="#" onclick='verify_remove_fence_devices(); return false;'>
 	<% end %>
       Remove</a>    </div>
   <div class="plus sprites"></div><div class="link"> 
@@ -32,315 +32,16 @@
     <td id="<%=@myView%>_list" class="node_list">
       <%= erb :_resource_list %>
     </td>
-	<td id="node_info" colspan=2>
-	  <div id="<%=@myView%>_info_div" {{bindAttr style="Pcs.resourcesController.cur_resource_info_style"}}>
-	  <div id="test">
-	    <div id="node_info_header">
-	      <div id="<%=@myView%>_info_header_title" class="node_info_header_title" >Edit <%= @pageName %> </div>
-	      <div id="node_info_header_title_name">
-		{{Pcs.resourcesController.cur_resource.name}}
-	      </div>
-	      <div>
-{{#if Pcs.resourcesController.cur_resource.group}}
-  ({{Pcs.resourcesController.cur_resource.group}})
-{{/if}}
-	      </div>
-	    </div>
-
-	    <div id="node_sub_info">
-	      <table>
-		<tr>
-		  <td><input disabled style="margin-right: 50px;" type="text" {{bind-attr value="Pcs.resourcesController.cur_resource.name"}} size="35" class="text_field"></td>
-		  <td><div style="margin-right: 8px;" class="check sprites"></div></td>
-		  <td>
-		  {{#if Pcs.resourcesController.cur_resource.failed}}
-		    <div id="res_status" class="status-offline">Failed</div>
-                  {{else}}
-		  {{#if Pcs.resourcesController.cur_resource.active}}
-		    <div id="res_status" class="status">Running
-		      {{#if Pcs.resourcesController.cur_resource.disabled}}
-		        (Disabled)
-		      {{/if}}
-		    </div>
-		  {{else}}
-		    <div id="res_status" class="status-offline">
-		      Inactive
-		      {{#if Pcs.resourcesController.cur_resource.disabled}}
-		        (Disabled)
-		      {{/if}}
-		     </div>
-		  {{/if}}
-		  {{/if}}
-		  </td>
-		</tr>
-	      </table>
-	    </div>
-
-	    <div id="node_options_buttons">
-	      <% if @myView == "resource" %>
-		<div class="checkdark sprites" style="float: left"></div>
-		<div id="resource_start_link" class="link">Enable</div>
-		<div class="cancel sprites" style="float: left"></div>
-		<div id="resource_stop_link" class="link">Disable</div>
-		<div class="restart sprites" style="float: left"></div>
-		<div id="resource_cleanup_link" class="link">Cleanup</div>
-		<div class="xdark sprites" style="float: left"></div>
-		<div id="resource_delete_link" class="link">Remove</div>
-	      <% else %>
-		<div class="xdark sprites" style="float: left"></div>
-		<div id="stonith_delete_link" class="link">Remove</div>
-		<div class="restart sprites" style="float: left"></div>
-		<div id="stonith_cleanup_link" class="link">Cleanup</div>
-	      <% end %>
-	      <!--
-	      <div class="move sprites" style="float: left"></div>
-	      <div id="resource_move_link" class="link">Move</div>
-	      <div class="history sprites" style="float: left"></div>
-	      <div id="resource_history_link" class="link">History</div>
-	      -->
-	    </div>
-
-	    <div id="node_details">
-	      <table style="margin-bottom:25px;"><tr>
-		  <td nowrap><div class="bold">Current Location:</div></td>
-		  <td><div id="cur_res_loc" class="reg">
-			{{Pcs.resourcesController.cur_resource.node_list}}
-		  </div> </td>
-		</tr>
-	      </table>
-	      <% if @myView == "resource" %>
-	      <table style="clear:left;float:left">
-		<tr><td style="display: block;" onclick="show_hide_constraints(this)" class="datatable_header hover-pointer" id="location_constraints"><span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span>Resource Location Preferences ({{#if Pcs.resourcesController.cur_resource.location_constraints}}{{Pcs.resourcesController.cur_resource.location_constraints.length}}{{else}}0{{/if}})</td></tr>
-		<tr><td>
-		    <div id="locationdep">
-		      <table class="datatable">
-			<tr><th>Node/Rule</th><th>Score</th><th>Remove</th></tr>
-			{{#each Pcs.resourcesController.cur_resource.location_constraints}}
-			<tr>
-			{{#if rule_string}}
-			  <td style="white-space: normal;">{{rule_string}}</td>
-			  <td>{{#if score-attribute}}{{score-attribute}}{{else}}{{score}}{{/if}}</td>
-			  <td {{bind-attr rule_id="id"}} style="text-align:center">
-			    {{#unless temp}}
-				<a onclick="remove_constraint_rule($(this).parent().attr('rule_id'));return false;" href="#" class="remove">X</a>
-			    {{/unless}}
-			  </td>
-			{{else}}
-			  <td>{{node}}</td>
-			  <td>{{score}}</td>
-			  <td {{bind-attr constraint_id="id"}} style="text-align:center">
-			    {{#unless temp}}
-				<a onclick="remove_constraint($(this).parent().attr('constraint_id'));return false;" href="#" class="remove">X</a>
-			    {{/unless}}
-			  </td>
-			{{/if}}
-			</tr>
-		        {{/each}}
-                        {{#unless Pcs.resourcesController.cur_resource.location_constraints}}
-			<tr><td style="color: gray;">NONE</td><td></td><td></td></tr>
-		        {{/unless}}
-			<tr id="new_res_loc">
-			  <td><input type="text" name="node_id"></td>
-			  <td><input type="text" name="score" size="5"></td>
-			  <td><button type="button" onclick="add_constraint('#new_res_loc','loc', false);" name="add">Add</button></td>
-			</tr>
-		      </table>
-		    </div>
-		  </td>
-		</tr>
-	      </table>
-	      <table style="clear:left;float:left;">
-		<tr><td style="display: block;" onclick="show_hide_constraints(this)" class="datatable_header hover-pointer" id="ordering_constraints"><span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span>Resource Ordering Preferences ({{#if Pcs.resourcesController.cur_resource.ordering_constraints.length}}{{Pcs.resourcesController.cur_resource.ordering_constraints.length}}{{else}}0{{/if}})</td></tr>
-		<tr><td>
-		    <div id="locationdep">
-		      <table class="datatable">
-			<tr><th>Resource</th><th>Action</th><th>Before/After</th><th>Action</th><th>Score</th><th>Remove</th></tr>
-		        {{#each Pcs.resourcesController.cur_resource.ordering_constraints}}	
-			  <tr><td>{{other_rsc}}</td>
-			    {{#if before}}
-			      <td>{{#if first-action}}{{first-action}}s{{else}}starts{{/if}}</td>
-			      <td>before {{then}}</td>
-			      <td>{{#if then-action}}{{then-action}}s{{else}}starts{{/if}}</td>
-			    {{else}}
-			      <td>{{#if then-action}}{{then-action}}s{{else}}starts{{/if}}</td>
-			      <td>after {{first}}</td>
-			      <td>{{#if first-action}}{{first-action}}s{{else}}starts{{/if}}</td>
-			    {{/if}}
-			    <td>{{score}}</td>
-			    <td {{bind-attr constraint_id="id"}} style="text-align:center">
-				  <a onclick="remove_constraint($(this).parent().attr('constraint_id'));return false;" href="#" class="remove">X</a>
-			    </td>
-			  </tr>
- 			{{/each}}
-			{{#unless Pcs.resourcesController.cur_resource.ordering_constraints}}
-			  <tr><td style="color: gray;">NONE</td><td></td><td></td><td></td><td></td><td></td></tr>
-			{{/unless}}
-			<tr id="new_res_orc">
-			  <td><input type="text" name="target_res_id"></td>
-			  <td>
-			    <select name="target_action">
-			      <option value="start">starts</option>
-			      <option value="promote">promotes</option>
-			      <option value="demote">demotes</option>
-			      <option value="stop">stops</option>
-			    </select>
-			  </td>
-			  <td>
-			    <select name="order">
-			      <option value="after">after</option>
-			      <option value="before">before</option>
-			    </select>
-			    {{Pcs.resourcesController.cur_resource.name}}
-			  </td>
-			  <td>
-			    <select name="res_action">
-			      <option value="start">starts</option>
-			      <option value="promote">promotes</option>
-			      <option value="demote">demotes</option>
-			      <option value="stop">stops</option>
-			    </select>
-			  </td>
-			  <td><input type="text" name="score" size="5"></td>
-			  <td><button type="button" onclick="add_constraint('#new_res_orc','ord', false);" name="add">Add</button></td>
-			</tr>
-		      </table>
-		    </div>
-		  </td>
-		</tr>
-	      </table>
-        <table style="clear:left;float:left;">
-          <tr><td style="display: block;" onclick="show_hide_constraints(this)" class="datatable_header hover-pointer" id="ordering_set_constraints"><span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span>Resource Ordering Set Preferences ({{#if Pcs.resourcesController.cur_resource.ordering_set_constraints.length}}{{Pcs.resourcesController.cur_resource.ordering_set_constraints.length}}{{else}}0{{/if}})</td></tr>
-          <tr><td>
-            <div id="locationdep">
-              <table class="datatable">
-                <tr>
-                  <th>Preference Name/Set of Resources</th>
-                  <th style="text-align: center;">Remove</th>
-                </tr>
-                {{#each Pcs.resourcesController.cur_resource.ordering_set_constraints}}
-                <tr>
-                  <td>{{id}}</td>
-                  <td {{bind-attr constraint_id="id"}} style="text-align:center;">
-                    <a onclick="remove_constraint($(this).parent().attr('constraint_id')); return false;" href="#" class="remove">X</a>
-                  </td>
-                </tr>
-                {{#each sets}}
-                <tr>
-                  <td style="padding-left:2em;">Set:{{#each rsc in resources}} {{rsc}}{{/each}}</td>
-                  <td></td>
-                </tr>
-                {{/each}}
-                {{/each}}
-                {{#unless Pcs.resourcesController.cur_resource.ordering_set_constraints}}
-                  <tr><td style="color: gray;">NONE</td><td></td></tr>
-                {{/unless}}
-                <tr id="new_res_orc_set" title="Enter the resources you want to be in one set into the 'Set' field separated by space.
-Use the 'New Set' button to create more sets.
-Use the 'Add' button to submit the form.">
-                  <td>Set: <input type="text" name="resource_ids[]"></td>
-                  <td style="vertical-align: bottom;">
-                    <button type="button" onclick="new_constraint_set_row('#new_res_orc_set');" name="new-row">New Set</button>
-                    <button type="button" onclick="add_constraint_set('#new_res_orc_set', 'ord', false);" name="add">Add</button>
-                  </td>
-                </tr>
-              </table>
-            </div>
-          </td></tr>
-        </table>
-	      <table style="clear:left;float:left">
-		<tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer" id="colocation_constraints"><span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span>Resource Colocation Preferences ({{#if Pcs.resourcesController.cur_resource.colocation_constraints.length}}{{Pcs.resourcesController.cur_resource.colocation_constraints.length}}{{else}}0{{/if}})</td></tr>
-		<tr><td>
-		    <div id="locationdep">
-		      <table class="datatable">
-			<tr><th>Resource</th><th>Together/Apart</th><th>Score</th><th>Remove</th></tr>
-			{{#each Pcs.resourcesController.cur_resource.colocation_constraints}}
-			  <tr><td>{{other_rsc}}</td>
-			    <td>{{together}}</td>
-			    <td>{{score}}</td>
-			    <td {{bind-attr constraint_id="id"}} style="text-align:center">
-				  <a onclick="remove_constraint($(this).parent().attr('constraint_id'));return false;" href="#" class="remove">X</a>
-			    </td>
-			  </tr>
-                        {{/each}}
- 			{{#unless Pcs.resourcesController.cur_resource.colocation_constraints}}
-			  <tr><td style="color: gray;">NONE</td><td></td><td></td><td></td></tr>
- 			{{/unless}}
-			<tr id="new_res_col">
-			  <td><input type="text" name="target_res_id"></td>
-			  <td><select name="colocate"><option value="together">Together<option value="apart">Apart</select></td>
-			  <td><input type="text" name="score" size="5"></td>
-			  <td><button type="button" onclick="add_constraint('#new_res_col','col', false);" name="add">Add</button></td>
-			</tr>
-
-		      </table>
-		    </div>
-		  </td>
-		</tr>
-	      </table>
-	      <table style="clear:left;float:left">
-		<tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer" id="meta_attr"><span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span>Resource Meta Attributes ({{#if Pcs.resourcesController.cur_resource.meta_attr.length}}{{Pcs.resourcesController.cur_resource.meta_attr.length}}{{else}}0{{/if}})</td></tr>
-		<tr><td>
-		    <div id="locationdep">
-		      <table class="datatable">
-			<tr><th>Meta Attribute</th><th>Value</th><th>Remove</th></tr>
-			{{#each Pcs.resourcesController.cur_resource.meta_attr}}
-			<tr><td>{{this.key}}</td>
-			    <td>{{this.value}}</td>
-			    <td {{bind-attr meta_attr_key="this.key"}} {{bind-attr meta_attr_res="this.parent"}} style="text-align:center">
-				  <a onclick="remove_meta_attr($(this).parent());return false;" href="#" class="remove">X</a>
-			    </td>
-			  </tr>
-                        {{/each}}
- 			{{#unless Pcs.resourcesController.cur_resource.meta_attr}}
-			  <tr><td style="color: gray;">NONE</td><td></td><td></td></tr>
- 			{{/unless}}
-			<tr id="new_meta_col">
-			  <td><input type="text" name="new_meta_key" size="20"></td>
-			  <td><input type="text" name="new_meta_value" size="20"></td>
-			  <td><button type="button" onclick="add_meta_attr('#new_meta_col');" name="add">Add</button></td>
-			</tr>
-
-		      </table>
-		    </div>
-		  </td>
-		</tr>
-	      </table>
-          <br style="clear:left;">
-	  <% end %>
-	</div>
-
-	    <table style="clear:left;float:left;margin-top:25px;">
-	      <% if @myView == "resource" and false %>
-	      <tr>
-		<td class="bold">Class:</td>
-		<td class="reg">
-		{{Pcs.resourcesController.cur_resource.res_class}}
-		</td>
-		<td style="padding-left:10px;" class="bold">Provider:</td>
-		<td class="reg">
-		{{Pcs.resourcesController.cur_resource.res_provider}}
-		</td>
-	      </tr>
-	      <% end %>
-	      <tr>
-		<td class="bold">Type:</td>
-		<td class="reg">
-		{{Pcs.resourcesController.cur_resource.agentname}}
-		</td>
-	      </tr>
-	    </table>
-
-	    <% if @myView == "resource" %>
-	      <div style="clear:left;" id="resource_agent_form"></div>
-	    <% else %>
-	      <div style="clear:left;" id="stonith_agent_form"></div>
-	    <% end %>
-	    {{#if Pcs.resourcesController.no_resources}}
-	      <span id="node_info_header"><span id="node_info_header_title">No resources have been configured</span></span>
-	     {{/if}}
-	    </div>
-	  </td>
-	</tr>
+  <td id="node_info" colspan=2>
+    <div id="<%=@myView%>_info_div">
+    <% if @myView == "resource" %>
+      {{resource-edit resource=Pcs.resourcesContainer.cur_resource page_name="Resource" old_pcsd=Pcs.resourcesContainer.is_version_1 utilization_support=Pcs.nodesController.utilization_support}}
+    <% else %>
+      {{resource-edit resource=Pcs.resourcesContainer.cur_fence page_name="Fence device" stonith=1 old_pcsd=Pcs.resourcesContainer.is_version_1}}
+    <% end %>
+    </div>
+    </td>
+  </tr>
     <% if @myView == "resource" %>
       </div>
     </table>
@@ -405,8 +106,4 @@ Use the 'Add' button to submit the form.">
 	<input id="resources_to_add_to_group"  type=hidden name="resources" value="">
       </form>
     </div>
-    <div id="verify_remove" style="display: none;">
-      <p style="font-size:12px;">Are you sure you want to remove the following resource(s)?</p>
-      <span id="resource_to_remove"></span>
-    </div>
     <% end %>
diff --git a/pcsd/views/_resource_list.erb b/pcsd/views/_resource_list.erb
index d6938a2..7dd2699 100644
--- a/pcsd/views/_resource_list.erb
+++ b/pcsd/views/_resource_list.erb
@@ -1,38 +1,5 @@
-    <table cellpadding=0 cellspacing=0 style="float:left;">
-      <tr>
-      <th><input type="checkbox" onchange="checkBoxToggle(this,false)"></th><th></th><th>NAME </th><th>TYPE</th><th></th>
-      </tr>
-	{{#each Pcs.resourcesController}}
-	<% if @myView == "resource" %>
-            {{#unless stonith}}
-	<% else %>
-            {{#if stonith}}
-        <% end %>
-	    <tr {{bind-attr onmouseover="onmouseover"}} {{bind-attr onmouseout="onmouseout"}} {{bind-attr class="trclass"}} {{bind-attr nodeID="name"}} onclick='Pcs.resourcesController.load_<%=@myView%>(this);'>
-
-	    <td class="node_list_check">
-            {{view Ember.Checkbox checkedBinding="checked" class="node_list_check"}}
-	    </td>
-	    <td class="node_list_sprite">
-	      <div class="check sprites"></div>
-	    </td>
-	    <td class="resource_name" {{bind-attr style="resource_name_style"}} nowrap>
-		{{full_name}}
-		{{#if group}}
-		({{group}})
-		{{/if}}
-	    </td>
-	    <td class="resource_type">
-		{{agentname}}
-	    </td>
-	    <td>
-		<div {{bind-attr style="showArrow"}} class="arrow sprites"></div>
-	    </td>
-	    </tr>
-	<% if @myView == "resource" %>
-            {{/unless}}
-	<% else %>
-            {{/if}}
-        <% end %>
-	{{/each}}
-    </table>
+<% if @myView == "resource" %>
+  {{resource-tree-view elements=Pcs.resourcesContainer.resource_list}}
+<% else %>
+  {{resource-tree-view elements=Pcs.resourcesContainer.fence_list}}
+<% end %>
diff --git a/pcsd/views/configure.erb b/pcsd/views/configure.erb
deleted file mode 100644
index 87d7889..0000000
--- a/pcsd/views/configure.erb
+++ /dev/null
@@ -1,39 +0,0 @@
-  <table id="configure_main">
-    <tr><td id="page_header" colspan=3>
-	<table id="config_options"><tr>
-	    <td>CLUSTER PROPERTIES</td>
-	    <td class="<%= (params[:page] == "general" or params[:page] == nil) ? "selected" : "" %>"><a href="/configure/general">General</a></td>
-	    <td class="<%= params[:page] == "pacemaker" ? "selected" : "" %>"><a href="/configure/pacemaker">Pacemaker</a></td>
-	    <td class="<%= params[:page] == "network" ? "selected" : "" %>"><a href="/configure/network">Network</a></td>
-	    <td class="<%= params[:page] == "resource" ? "selected" : "" %>"><a href="/configure/resource">Resource</a></td>
-	    <td class="<%= params[:page] == "logging" ? "selected" : "" %>"><a href="/configure/logging">Logging</a></td>
-	</tr></table>
-    </tr>
-    <tr><td colspan=3>
-	<hr>
-    </td></tr>
-    <tr>
-      <td id="config" colspan=3>
-	<form method=POST>
-	  <% @config_options.each { |co_group| %>
-	    <table>
-	      <% co_group.each { |co| %>
-		<tr>
-		  <td class="label"><%= co.name %>:</td>
-		  <td><%= co.html %><span class="units"><%= co.units %></span></td>
-		</tr>
-	      <% } %>
-	    </table>
-	    <br>
-	  <% } %> 
-	  <% if @config_options.length != 0 %>
-	    <input type=submit style="margin-left:20px;" class="text_field" value="Apply Changes">
-	  <% end %>
-	</form>
-      </td>
-    </tr>
-    <tr><td colspan=3>
-	<hr>
-    </td></tr>
-  </div>
-</table>
diff --git a/pcsd/views/fenceagentform.erb b/pcsd/views/fenceagentform.erb
index fc47423..f54a8de 100644
--- a/pcsd/views/fenceagentform.erb
+++ b/pcsd/views/fenceagentform.erb
@@ -4,7 +4,7 @@
     <table style="clear:left; float:left; margin-top: 25px;">
       <tr>
         <td><div class="bold">Description:</div></td>
-        <td><span class="reg" style="float:left;"><%=h(@fenceagent.short_desc)%> </span> <span title="<%=h(@fenceagent.long_desc)%>" onclick="$(this).closest('table').find('.long_desc_div').toggle();" class="infoicon sprites" style="margin-top:2px;"></span></td>
+        <td><span class="reg" style="float:left;"><%=h(@fenceagent.short_desc)%> </span> <span title="<%=nl2br(h(@fenceagent.long_desc))%>" onclick="$(this).closest('table').find('.long_desc_div').toggle();" class="infoicon sprites" style="margin-top:2px;"></span></td>
       </tr>
       <tr>
         <td></td>
@@ -32,7 +32,7 @@
 	     <%= name %>
 	  </td>
 	  <td>
-	    <input placeholder="<%=desc[0]%>" style="margin-right: 50px;" type="text" name="<%= @cur_resource && @cur_resource.options[name] ? "_res_paramne_" : "_res_paramempty_"%><%=name%>" value="<%=@cur_resource.options[name] if (@existing_resource and @cur_resource and @cur_resource.options)%>" size="35" class="text_field">
+	    <input placeholder="<%=desc[0]%>" style="margin-right: 50px;" type="text" name="<%= @cur_resource && @cur_resource.instance_attr[name] ? "_res_paramne_" : "_res_paramempty_"%><%=name%>" value="<%=h(@cur_resource.instance_attr[name].value) if (@existing_resource and @cur_resource and @cur_resource.instance_attr[name])%>" size="35" class="text_field">
 	  </td>
 	</tr>
       <% } %>
@@ -48,7 +48,7 @@
 	     <%= name %>
 	  </td>
 	  <td>
-            <input placeholder="<%=desc[0]%>" style="margin-right: 50px;" type="text" name="<%= @cur_resource && @cur_resource.options[name] ? "_res_paramne_" : "_res_paramempty_"%><%=name%>" value="<%=@cur_resource.options[name] if @existing_resource and @cur_resource and @cur_resource.options%>" size="35" class="text_field">
+            <input placeholder="<%=desc[0]%>" style="margin-right: 50px;" type="text" name="<%= @cur_resource && @cur_resource.instance_attr[name] ? "_res_paramne_" : "_res_paramempty_"%><%=name%>" value="<%=h(@cur_resource.instance_attr[name].value) if @existing_resource and @cur_resource and @cur_resource.instance_attr[name]%>" size="35" class="text_field">
 	  </td>
 	</tr>
       <% } %>
@@ -64,7 +64,7 @@
          <%= name %>
       </td>
       <td>
-            <input placeholder="<%=desc[0]%>" style="margin-right: 50px;" type="text" name="<%= @cur_resource && @cur_resource.options[name] ? "_res_paramne_" : "_res_paramempty_"%><%=name%>" value="<%=@cur_resource.options[name] if @existing_resource and @cur_resource and @cur_resource.options%>" size="35" class="text_field">
+            <input placeholder="<%=desc[0]%>" style="margin-right: 50px;" type="text" name="<%= @cur_resource && @cur_resource.instance_attr[name] ? "_res_paramne_" : "_res_paramempty_"%><%=name%>" value="<%=h(@cur_resource.instance_attr[name].value) if @existing_resource and @cur_resource and @cur_resource.instance_attr[name]%>" size="35" class="text_field">
       </td>
     </tr>
       <% } %>
diff --git a/pcsd/views/main.erb b/pcsd/views/main.erb
index 3c89117..4e6aff3 100644
--- a/pcsd/views/main.erb
+++ b/pcsd/views/main.erb
@@ -15,6 +15,17 @@
   <script src="/js/ember-1.4.0.js"></script>
   <script src="/js/pcsd.js"></script>
   <script type="text/javascript">
+    $(function () { // use jquery html tooltips
+      $.widget("ui.tooltip", $.ui.tooltip, {
+        options: {
+          content: function () {
+            return $(this).prop('title');
+          },
+          track: true
+        }
+      });
+      $(document).tooltip();
+    });
     var origMainTRHeight = 0;
     function resizeDialog() {
       $('#add_resource').dialog('option','position','center');
@@ -52,91 +63,587 @@
 </head>
 <html>
   <body>
-  <script type="text/x-handlebars">
-    <div id="wrapper">
-  <div id="header">
-    <div id="topmenu">
-    <table id="menu_controls">
-	<tr><td rowspan=2 width="170px" style="vertical-align: top;">
-	    <div id="logo">
-	      <img width="170" height="40" src="/images/HAM-logo.png" onclick="if (Pcs) { Pcs.update(); }">
-	    </div>
-	  </td>
-	</tr>
-	<tr><td>
-	    <div id="menu_list">
-	      <table>
-		<tr>
-		  <td colspan=3 style="padding-bottom:8px;">
-		      <% if @clusters != nil %>
-		      <div style="float:left;width:100%">
-			<div class="menuheader">
-			  <div id="dropdownr" class="label"  style="white-space:nowrap" onclick="window.location='/manage'">
-			    <table style="background: #000000">
-			    <tr><td>
-			    <%=@manage ? "Select Cluster" : "Cluster: " + @cluster_name%>
-			    </td>
-			    <td><div style="float:right;" class="downarrow sprites"></div></td>
-			    </tr></table>
-			  </div>
-			  <div style="width:100%;" class="menu">
-			    <% if not @manage %>
-			      <a href="/manage" class="menu-item">Manage Clusters  </a>
-			    <% end %>
-			  <% @clusters.each { |c| %>
-			    <a href="<%=c.ui_address%>" class="menu-item"><%=c.name%>  </a>
-			  <% } %>
-			  </div>
-			</div>
-		      </div>
-		      <% end %>
-		  </td>
-		  <td colspan=4 style="padding-bottom:8px;">
-		    <% if session[:username] %>
-		      <div style="float:right">
-			<div class="menuheader">
-			<div id="dropdownr" class="label"><%=session[:username]%><div style="float:right;" class="downarrow sprites"></div></div>
-			<div style="width:100%" class="menu">
-			  <a href="/logout" class="menu-item">Logout  </a>
-			</div>
-			</div>
-			<!--		      <div id="dropdownr">
-			  <%=session[:username]%><div style="float:right;" class="downarrow sprites"></div>
-			</div> -->
-		      </div>
-		    <% end %>
+  <script type="text/x-handlebars" data-template-name="components/list-view">
+  <div style="width: 300px;" id="node_list" {{bind-attr class=element-class}}>
+    <table cellpadding="0" cellspacing="0" class="list-view" style="width: 100%; border: none;">
+      <tr>
+        <th style="width:27px;"><input type="checkbox" onchange="checkBoxToggle(this,false)"></th><th style="width:47px;"></th><th>{{table-title}}</th><th style="width:18px;"></th>
+      </tr>
+      {{#each element in elements}}
+      <tr class="list-view-element" onmouseover="$(this).addClass('mouse_on_row');" onmouseout="$(this).removeClass('mouse_on_row');" onclick="list_view_select(get_list_view_element_id(this));" {{bind-attr id=element.name}}>
+        <td class="node_list_check">{{view Ember.Checkbox checkedBinding="checked" class="node_list_check"}}</td>
+        <td class="node_list_sprite">{{#if default-icon}}{{{default-icon}}}{{else}}{{#if element.status_icon}}{{{element.status_icon}}}{{else}}<div class="check sprites"></div>{{/if}}{{/if}}</td>
+        <td class="node_name" nowrap {{bind-attr style=element.style}}>{{element.name}}</td>
+        <td>
+          <div style="display: none;" class="arrow sprites"></div>
+        </td>
+      </tr>
+      {{/each}}
+      </table>
+    </div>
+  </script>
+
+  <script type="text/x-handlebars" data-template-name="components/resource-tree-view">
+  <div style="width: 450px;">
+    <table cellpadding="0" cellspacing="0" class="tree-view" style="width: 100%; border: none;">
+      <tr>
+        <th style="width:27px;"><input type="checkbox" onchange="checkBoxToggle(this,false)"></th><th style="width:47px;"></th><th>NAME</th><th style="width:220px;">TYPE</th><th style="width:18px;"></th>
+      </tr>
+      {{#each element in elements}}
+      <tr><td colspan="5" style="padding-top: 0px; padding-bottom: 0px;">
+          {{resource-tree-element node=element}}
+      </td></tr>
+      {{/each}}
+      </table>
+    </div>
+  </script>
+
+  <script type="text/x-handlebars" data-template-name="components/resource-tree-element">
+    <table class="tree-element"  cellpadding="0" cellspacing="0" style="width: 100%; border: none;" {{bind-attr id=node._id}}>
+    <tr class="tree-element-name" onclick="tree_view_onclick(get_tree_view_element_id(this));" onmouseover="$(this).addClass('mouse_on_row');" onmouseout="$(this).removeClass('mouse_on_row');" {{bind-attr nodeID=node.id}}>
+        <td style="width:20px;" class="node_list_check">
+          <input type="checkbox" onchange="tree_view_checkbox_onchange(this)">
+        </td>
+        <td style="width:18px;" class="node_list_sprite">{{{node.status_icon}}}</td>
+        <td class="resource_name" nowrap {{bind-attr node=node.style}}>
+          <span {{bind-attr class=node.span_class}}>{{node._id}}</span>
+        </td>
+        <td style="width:200px;" class="resource_type" {{bind-attr style=node.style}}>{{node.resource_type}}</td>
+        <td style="width:18px;">
+          <div style="display: none;" class="arrow sprites"></div>
+        </td>
+    </tr>
+      {{#if node.children.length}}
+      <tr class="children" style="display: none;">
+        <td colspan="5" style="padding-left: 10px; padding-top: 0px; padding-bottom: 0px;">
+        {{#each child in node.children}}
+          {{resource-tree-element node=child}}
+        {{/each}}
+        </td>
+      </tr>
+      {{/if}}
+      </table>
+  </script>
+
+  <script type="text/x-handlebars" data-template-name="components/issues-table">
+  {{#if issue_list.length}}
+  <div class="issue_table">
+  <span {{bind-attr style=table_name_style}}>{{table_name}}</span>
+  <table style="width: 100%" {{bind-attr class=issue_class}}>
+    {{#each issue in issue_list}}
+      <tr><td>
+        {{issue.message}}
+      </td></tr>
+    {{/each}}
+  </table>
+  </div>
+  {{/if}}
+  </script>
+
+  <script type="text/x-handlebars" data-template-name="components/resource-edit">
+  {{#if resource}}
+    <div id="node_info_header">
+      <div class="node_info_header_title" >Edit {{page_name}} </div>
+      <div id="node_info_header_title_name">{{resource._id}}</div>
+    </div>
+    <div id="node_sub_info">
+      <table>
+        <tr>
+          <td>
+            <input disabled style="margin-right: 50px;" type="text" {{bind-attr value=resource._id}} size="35" class="text_field">
+          </td>
+          <td>
+            {{{resource.status_icon}}}
+          </td>
+          <td nowrap>{{{resource.show_status}}}</td>
+        </tr>
+      </table>
+    </div>
+    <div id="node_options_buttons">
+    {{#if resource.stonith}}
+      <div class="xdark sprites" style="float: left"></div>
+      <div id="stonith_delete_link" class="link" onclick="verify_remove_fence_devices(curStonith());">Remove</div>
+      <div class="restart sprites" style="float: left"></div>
+      <div id="stonith_cleanup_link" class="link" onclick="cleanup_stonith();">Cleanup</div>
+    {{else}}
+      <div class="checkdark sprites" style="float: left"></div>
+      <div id="resource_start_link" class="link" onclick="enable_resource();">Enable</div>
+      <div class="cancel sprites" style="float: left"></div>
+      <div id="resource_stop_link" class="link" onclick="disable_resource();">Disable</div>
+      <div class="restart sprites" style="float: left"></div>
+      <div id="resource_cleanup_link" class="link" onclick="cleanup_resource();">Cleanup</div>
+      <div class="xdark sprites" style="float: left"></div>
+      <div id="resource_delete_link" class="link" onclick="verify_remove_resources(curResource());">Remove</div>
+    {{/if}}
+      <!--
+      <div class="move sprites" style="float: left"></div>
+      <div id="resource_move_link" class="link">Move</div>
+      <div class="history sprites" style="float: left"></div>
+      <div id="resource_history_link" class="link">History</div>
+      -->
+    </div>
+    <br>
+    <div id="resource_issues">
+      {{issues-table table_name="Errors" table_name_style="color:red;" issue_list=resource.error_list issue_class="error_table"}}
+      {{issues-table table_name="Warnings" table_name_style="color:orange;" issue_list=resource.warning_list issue_class="warning_table"}}
+    </div>
+    <div id="node_details">
+      <table style="margin-bottom:25px;">
+        <tr>
+          <td class="bold" nowrap>Type:</td>
+          <td class="reg" nowrap>{{resource.res_type}}</td>
+        </tr>
+        {{#unless resource.stonith}}
+        {{#if resource.is_primitive}}
+          <tr>
+            <td class="bold" nowrap>Current Location:</td>
+            <td id="cur_res_loc" class="reg">{{resource.nodes_running_on_string}}</td>
+          </tr>
+          {{#unless old_pcsd}}
+          {{#unless resource.parent}}
+            <tr>
+              <td class="bold" nowrap>Clone:</td>
+              <td id="cur_res_loc" class="reg" title="Makes the resource run multiple times on the cluster. By default the resource will run once on each of the nodes.">
+                <input type="button" onclick="resource_clone(curResource());" value="Create clone">
+              </td>
+            </tr>
+            <tr>
+              <td class="bold" nowrap>Master/Slave:</td>
+              <td id="cur_res_loc" class="reg" title="Makes the resource run multiple times on the cluster and distinguish between Master and Slave operating mode for each instance.By default the resource will run on one node in Master mode and on all other nodes in Slave mode.">
+                <input type="button" onclick="resource_master(curResource());" value="Create master/slave">
+              </td>
+            </tr>
+            <tr>
+              <td class="bold" nowrap>Group:</td>
+              <td id="cur_res_loc" class="reg">
+                {{{resource.group_selector}}}
+              </td>
+            </tr>
+          {{else}}
+            {{#if resource.parent.is_group}}
+            <tr>
+              <td class="bold" nowrap>Group:</td>
+              <td id="cur_res_loc" class="reg">
+                {{{resource.group_selector}}}
+              </td>
+            </tr>
+            {{/if}}
+          {{/unless}}
+          {{/unless}}
+        {{/if}}
+        {{/unless}}
+        {{#unless old_pcsd}}
+        {{#if resource.is_group}}
+        {{#unless resource.parent}}
+          <tr>
+            <td class="bold" nowrap>Clone:</td>
+            <td id="cur_res_loc" class="reg" title="Makes the resource run multiple times on the cluster. By default the resource will run once on each of the nodes.">
+              <input type="button" onclick="resource_clone(curResource());" value="Create clone">
+            </td>
+          </tr>
+          <tr>
+            <td class="bold" nowrap>Master/Slave:</td>
+            <td id="cur_res_loc" class="reg" title="Makes the resource run multiple times on the cluster and distinguish between Master and Slave operating mode for each instance.By default the resource will run on one node in Master mode and on all other nodes in Slave mode.">
+              <input type="button" onclick="resource_master(curResource());" value="Create master/slave">
+            </td>
+          </tr>
+          <tr>
+            <td class="bold" nowrap>Group:</td>
+            <td id="cur_res_loc" class="reg">
+              <input type="button" onclick="resource_ungroup(curResource());" value="Ungroup">
+            </td>
+          </tr>
+        {{/unless}}
+        {{/if}}
+        {{#if resource.is_multi_instance}}
+          <tr>
+            <td class="bold" nowrap>Clone:</td>
+            <td id="cur_res_loc" class="reg">
+              <input type="button" onclick="resource_unclone(curResource());" value="Unclone">
+            </td>
+          </tr>
+        {{/if}}
+        {{/unless}}
+      </table>
+      {{#unless resource.stonith}}
+        {{location_constraints-table constraints=resource.location_constraints}}
+        {{ordering_constraints-table constraints=resource.ordering_constraints resource_id=resource._id}}
+        {{ordering_set_constraints-table constraints=resource.ordering_set_constraints}}
+        {{colocation_constraints-table constraints=resource.colocation_constraints}}
+        {{meta_attributes-table resource=resource}}
+        {{#if utilization_support}}
+          {{#if resource.is_primitive}}
+            {{utilization-table entity=resource utilization=resource.utilization type="resource"}}
+          {{/if}}
+        {{/if}}
+        <br style="clear:left;">
+      {{/unless}}
+    </div>
+    {{#if stonith}}
+      <div style="clear:left; margin-top: 2em;" id="stonith_agent_form"></div>
+    {{else}}
+    {{#if resource.is_primitive}}
+      <div style="clear:left; margin-top: 2em;" id="resource_agent_form"></div>
+    {{/if}}
+    {{/if}}
+  {{else}}
+    {{#if stonith}}
+      NO FENCE DEVICE IN CLUSTER
+    {{else}}
+      NO RESOURCES IN CLUSTER
+    {{/if}}
+  {{/if}}
+  </script>
+
+  <script type="text/x-handlebars" data-template-name="components/location_constraints-table">
+    <table style="clear:left;float:left">
+		<tr><td style="display: block;" onclick="show_hide_constraints(this)" class="datatable_header hover-pointer" id="location_constraints"><span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span>Resource Location Preferences ({{#if constraints}}{{constraints.length}}{{else}}0{{/if}})</td></tr>
+		<tr><td>
+		    <div id="locationdep">
+		      <table class="datatable">
+			<tr><th>Node/Rule</th><th>Score</th><th>Remove</th></tr>
+			{{#each cons in constraints}}
+			<tr>
+			{{#if cons.rule_string}}
+			  <td style="white-space: normal;">{{cons.rule_string}}</td>
+			  <td>{{#if cons.score-attribute}}{{cons.score-attribute}}{{else}}{{cons.score}}{{/if}}</td>
+			  <td {{bind-attr rule_id=cons.id}} style="text-align:center">
+			    {{#unless cons.temp}}
+				<a onclick="remove_constraint_rule($(this).parent().attr('rule_id'));return false;" href="#" class="remove">X</a>
+			    {{/unless}}
+			  </td>
+			{{else}}
+			  <td>{{cons.node}}</td>
+			  <td>{{cons.score}}</td>
+			  <td {{bind-attr constraint_id=cons.id}} style="text-align:center">
+			    {{#unless cons.temp}}
+				<a onclick="remove_constraint($(this).parent().attr('constraint_id'));return false;" href="#" class="remove">X</a>
+			    {{/unless}}
+			  </td>
+			{{/if}}
+			</tr>
+			{{else}}
+			<tr><td style="color: gray;">NONE</td><td></td><td></td></tr>
+		        {{/each}}
+			<tr id="new_res_loc">
+			  <td><input type="text" name="node_id"></td>
+			  <td><input type="text" name="score" size="5"></td>
+			  <td><button type="button" onclick="add_constraint('#new_res_loc','loc', false);" name="add">Add</button></td>
+			</tr>
+		      </table>
+		    </div>
 		  </td>
 		</tr>
-		<% if !@manage %>
-		<tr>
-		  <td class="menuitem first">{{#link-to "Nodes" }}NODES{{/link-to}}</td>
-		  <td class="menuitem">{{#link-to "Resources" }}RESOURCES{{/link-to}}</td>
-		  <td class="menuitem">{{#link-to "Fence Devices" }}FENCE DEVICES{{/link-to}}</td>
-		  <td class="menuitem">{{#link-to "ACLs" }}ACLS{{/link-to}}</td>
-		  <td class="menuitem">{{#link-to "Configuration" }}CLUSTER PROPERTIES{{/link-to}}</td>
-		  <td class="menuitem"><a href="/manage">MANAGE CLUSTERS</a></td>
-		  <!--<td class="menuitem">{{#link-to "Wizards" }}WIZARDS{{/link-to}}</td>-->
+    </table>
+  </script>
+
+  <script type="text/x-handlebars" data-template-name="components/ordering_constraints-table">
+    <table style="clear:left;float:left;">
+		<tr><td style="display: block;" onclick="show_hide_constraints(this)" class="datatable_header hover-pointer" id="ordering_constraints"><span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span>Resource Ordering Preferences ({{#if constraints.length}}{{constraints.length}}{{else}}0{{/if}})</td></tr>
+		<tr><td>
+		    <div id="locationdep">
+		      <table class="datatable">
+			<tr><th>Resource</th><th>Action</th><th>Before/After</th><th>Action</th><th>Score</th><th>Remove</th></tr>
+		        {{#each cons in constraints}}
+			  <tr><td>{{cons.other_rsc}}</td>
+			    {{#if cons.before}}
+			      <td>{{#if cons.first-action}}{{cons.first-action}}s{{else}}starts{{/if}}</td>
+			      <td>before {{then}}</td>
+			      <td>{{#if cons.then-action}}{{cons.then-action}}s{{else}}starts{{/if}}</td>
+			    {{else}}
+			      <td>{{#if cons.then-action}}{{cons.then-action}}s{{else}}starts{{/if}}</td>
+			      <td>after {{cons.first}}</td>
+			      <td>{{#if cons.first-action}}{{cons.first-action}}s{{else}}starts{{/if}}</td>
+			    {{/if}}
+			    <td>{{cons.score}}</td>
+			    <td {{bind-attr constraint_id=cons.id}} style="text-align:center">
+				  <a onclick="remove_constraint($(this).parent().attr('constraint_id'));return false;" href="#" class="remove">X</a>
+			    </td>
+			  </tr>
+			{{else}}
+			<tr><td style="color: gray;">NONE</td><td></td><td></td><td></td><td></td><td></td></tr>
+ 			{{/each}}
+			<tr id="new_res_orc">
+			  <td><input type="text" name="target_res_id"></td>
+			  <td>
+			    <select name="target_action">
+			      <option value="start">starts</option>
+			      <option value="promote">promotes</option>
+			      <option value="demote">demotes</option>
+			      <option value="stop">stops</option>
+			    </select>
+			  </td>
+			  <td>
+			    <select name="order">
+			      <option value="after">after</option>
+			      <option value="before">before</option>
+			    </select>
+			    {{resource_id}}
+			  </td>
+			  <td>
+			    <select name="res_action">
+			      <option value="start">starts</option>
+			      <option value="promote">promotes</option>
+			      <option value="demote">demotes</option>
+			      <option value="stop">stops</option>
+			    </select>
+			  </td>
+			  <td><input type="text" name="score" size="5"></td>
+			  <td><button type="button" onclick="add_constraint('#new_res_orc','ord', false);" name="add">Add</button></td>
+			</tr>
+		      </table>
+		    </div>
+		  </td>
 		</tr>
-	      <% else %>
-		<tr height="37px">
-		  <td colspan=5 width="565px"></td>
+	      </table>
+  </script>
+
+  <script type="text/x-handlebars" data-template-name="components/ordering_set_constraints-table">
+    <table style="clear:left;float:left;">
+          <tr><td style="display: block;" onclick="show_hide_constraints(this)" class="datatable_header hover-pointer" id="ordering_set_constraints"><span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span>Resource Ordering Set Preferences ({{#if constraints.length}}{{constraints.length}}{{else}}0{{/if}})</td></tr>
+          <tr><td>
+            <div id="locationdep">
+              <table class="datatable">
+                <tr>
+                  <th>Preference Name/Set of Resources</th>
+                  <th style="text-align: center;">Remove</th>
+                </tr>
+                {{#each cons in constraints}}
+                <tr>
+                  <td>{{cons.id}}</td>
+                  <td {{bind-attr constraint_id="cons.id"}} style="text-align:center;">
+                    <a onclick="remove_constraint($(this).parent().attr('constraint_id')); return false;" href="#" class="remove">X</a>
+                  </td>
+                </tr>
+                {{#each set in cons.sets}}
+                <tr>
+                  <td style="padding-left:2em;">Set:{{#each rsc in set.resources}} {{rsc}}{{/each}}</td>
+                  <td></td>
+                </tr>
+                {{/each}}
+                {{else}}
+                <tr><td style="color: gray;">NONE</td><td></td></tr>
+                {{/each}}
+                <tr id="new_res_orc_set" title="Enter the resources you want to be in one set into the 'Set' field separated by space.
+Use the 'New Set' button to create more sets.
+Use the 'Add' button to submit the form.">
+                  <td>Set: <input type="text" name="resource_ids[]"></td>
+                  <td style="vertical-align: bottom;">
+                    <button type="button" onclick="new_constraint_set_row('#new_res_orc_set');" name="new-row">New Set</button>
+                    <button type="button" onclick="add_constraint_set('#new_res_orc_set', 'ord', false);" name="add">Add</button>
+                  </td>
+                </tr>
+              </table>
+            </div>
+          </td></tr>
+        </table>
+  </script>
+
+  <script type="text/x-handlebars" data-template-name="components/colocation_constraints-table">
+    <table style="clear:left;float:left">
+		<tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer" id="colocation_constraints"><span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span>Resource Colocation Preferences ({{#if constraints.length}}{{constraints.length}}{{else}}0{{/if}})</td></tr>
+		<tr><td>
+		    <div id="locationdep">
+		      <table class="datatable">
+			<tr><th>Resource</th><th>Together/Apart</th><th>Score</th><th>Remove</th></tr>
+			{{#each cons in constraints}}
+			  <tr><td>{{cons.other_rsc}}</td>
+			    <td>{{cons.together}}</td>
+			    <td>{{cons.score}}</td>
+			    <td {{bind-attr constraint_id=cons.id}} style="text-align:center">
+				  <a onclick="remove_constraint($(this).parent().attr('constraint_id'));return false;" href="#" class="remove">X</a>
+			    </td>
+			  </tr>
+			  {{else}}
+			  <tr><td style="color: gray;">NONE</td><td></td><td></td><td></td></tr>
+              {{/each}}
+			<tr id="new_res_col">
+			  <td><input type="text" name="target_res_id"></td>
+			  <td><select name="colocate"><option value="together">Together<option value="apart">Apart</select></td>
+			  <td><input type="text" name="score" size="5"></td>
+			  <td><button type="button" onclick="add_constraint('#new_res_col','col', false);" name="add">Add</button></td>
+			</tr>
+
+		      </table>
+		    </div>
+		  </td>
 		</tr>
-	      <% end %>
 	      </table>
-	    </div>
-	</td>
-	<td>
-	</td>
+  </script>
+
+  <script type="text/x-handlebars" data-template-name="components/meta_attributes-table">
+    <table style="clear:left;float:left">
+		<tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer" id="meta_attributes"><span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span>Resource Meta Attributes ({{#if resource.meta_attr.length}}{{resource.meta_attr.length}}{{else}}0{{/if}})</td></tr>
+		<tr><td>
+		    <div id="locationdep">
+		      <table class="datatable">
+			<tr><th>Meta Attribute</th><th>Value</th><th>Remove</th></tr>
+			{{#each meta in resource.meta_attr}}
+			<tr><td>{{meta.name}}</td>
+			    <td>{{meta.value}}</td>
+			    <td {{bind-attr meta_attr_key=meta.name}} {{bind-attr meta_attr_res=resource.id}} style="text-align:center">
+				  <a onclick="remove_meta_attr($(this).parent());return false;" href="#" class="remove">X</a>
+			    </td>
+			  </tr>
+			  {{else}}
+			  <tr><td style="color: gray;">NONE</td><td></td><td></td></tr>
+			  {{/each}}
+			<tr id="new_meta_col">
+			  <td><input type="text" name="new_meta_key" size="20"></td>
+			  <td><input type="text" name="new_meta_value" size="20"></td>
+			  <td><button type="button" onclick="add_meta_attr('#new_meta_col');" name="add">Add</button></td>
+			</tr>
+		      </table>
+		    </div>
+		  </td>
+		</tr>
+    </table>
+  </script>
+
+  <script type="text/x-handlebars" data-template-name="components/utilization-table">
+    <table style="clear:left; float:left;">
+      <tr>
+        <td {{action toggleBody}} class="datatable_header hover-pointer">
+          {{#if show_content}}
+            <span class="downarrow sprites"></span>
+          {{else}}
+            <span class="rightarrow sprites"></span>
+          {{/if}}
+          <span style="text-transform: capitalize;">{{type}} Utilization Attributes ({{util_count}})</span>
+        </td>
       </tr>
-      </table>
-    </div>
-  </div>
+      {{#if show_content}}
+      <tr>
+        <td>
+          <div class="utilization_table">
+            <table class="datatable">
+              <tr>
+                <th>Utilization Attribute</th><th>Value</th><th>Remove</th>
+              </tr>
+              {{#each util in utilization}}
+              <tr>
+                <td>{{util.name}}</td>
+                <td>{{util.value}}</td>
+                <td style="text-align: center;">
+                  <a {{action remove util.name}} class="remove" href="#">X</a>
+                </td>
+              </tr>
+              {{else}}
+              <tr>
+                <td style="color: gray;">NONE</td><td></td><td></td>
+              </tr>
+              {{/each}}
+              <tr {{bind-attr id=form_id}}>
+                <td>
+                  <input type="text" name="new_utilization_name" size="20">
+                </td>
+                <td>
+                  <input type="text" name="new_utilization_value" size="20">
+                </td>
+                <td>
+                  <button {{action add form_id}} type="button" name="add">Add</button>
+                </td>
+              </tr>
+            </table>
+          </div>
+        </td>
+      </tr>
+      {{/if}}
+    </table>
+  </script>
+  
+  <script type="text/x-handlebars">
+<div id="wrapper">
+
+<div id="header"><div id="topmenu">
+  <table id="menu_controls">
+    <tr>
+      <td width="170" style="vertical-align: top;">
+        <div id="logo">
+          <img width="170" height="40" src="/images/HAM-logo.png" onclick="if (Pcs) { Pcs.update(); }">
+        </div>
+      </td>
+      <td>
+        <div id="menu_list">
+          <table width="700">
+            <tr>
+              <td style="padding-bottom:8px;">
+                <% if @clusters != nil %>
+                  <div style="float:left;">
+                    <div class="menuheader">
+                      <div id="dropdownr" class="label"  style="white-space:nowrap" onclick="window.location='/manage'">
+                        <table style="background: #000000">
+                          <tr>
+                            <td>
+                              <%=@manage ? "Select Cluster" : "Cluster: " + @cluster_name%>
+                            </td>
+                            <td>
+                              <div style="float:right;" class="downarrow sprites"></div>
+                            </td>
+                          </tr>
+                        </table>
+                      </div>
+                      <div style="width:100%;" class="menu">
+                        <% if not @manage %>
+                          <a href="/manage" class="menu-item">Manage Clusters  </a>
+                        <% end %>
+                        <% @clusters.each { |c| %>
+                          <a href="<%=c.ui_address%>" class="menu-item"><%=c.name%>  </a>
+                        <% } %>
+                      </div>
+                    </div>
+                  </div>
+                <% end %>
+              </td>
+              <td style="padding-bottom:8px;">
+                <% if session[:username] %>
+                  <div style="float:right">
+                    <div class="menuheader">
+                      <div id="dropdownr" class="label"><%=session[:username]%><div style="float:right;" class="downarrow sprites"></div></div>
+                      <div style="width:100%" class="menu">
+                        <a href="/logout" class="menu-item">Logout  </a>
+                      </div>
+                    </div>
+                    <!-- <div id="dropdownr">
+                      <%=session[:username]%><div style="float:right;" class="downarrow sprites"></div>
+                    </div> -->
+                  </div>
+                <% end %>
+              </td>
+            </tr>
+          </table>
+          <table>
+            <% if !@manage %>
+              <tr>
+                <td class="menuitem first">{{#link-to "Nodes" }}NODES{{/link-to}}</td>
+                <td class="menuitem">{{#link-to "Resources" }}RESOURCES{{/link-to}}</td>
+                <td class="menuitem">{{#link-to "Fence Devices" }}FENCE DEVICES{{/link-to}}</td>
+                <td class="menuitem">{{#link-to "ACLs" }}ACLS{{/link-to}}</td>
+                <td class="menuitem">{{#link-to "Configuration" }}CLUSTER PROPERTIES{{/link-to}}</td>
+                <td class="menuitem"><a href="/manage">MANAGE CLUSTERS</a></td>
+                <!--<td class="menuitem">{{#link-to "Wizards" }}WIZARDS{{/link-to}}</td>-->
+              </tr>
+            <% else %>
+              <tr>
+                <td class="menuitem first"><a href="/manage">MANAGE CLUSTERS</a></td>
+                <td class="menuitem"><a href="/permissions">PERMISSIONS</a></td>
+              </tr>
+            <% end %>
+          </table>
+        </div>
+      </td>
+      <td> </td>
+    </tr>
+  </table>
+</div></div>
+
+<div id="content">
+  <%= yield %>
+</div>
+
+<div id="push">
+</div>
 
-  <div id="content">
-      <%= yield %>
-  </div>
-  <div id="push">
-  </div>
 </div>
     </script>
 <!--  <div id="footer">
@@ -166,7 +673,7 @@ $(function() {
   <script type="text/javascript">
     $(function() {
       $("#manage_error").dialog({
-	title: "Bad Cluster/Node Name",
+	title: "Warning",
 	buttons: {
 	  Ok: function() {
 	    $(this).dialog("close");
diff --git a/pcsd/views/manage.erb b/pcsd/views/manage.erb
index c76849e..1b53ec9 100644
--- a/pcsd/views/manage.erb
+++ b/pcsd/views/manage.erb
@@ -8,7 +8,7 @@
 	  <td>
 	      <a href="#" class="x sprites"></a>
 	    <div class="link">
-	      <a id="manage_remove_cluster" href="#" onclick="verify_remove('cluster','You must select at least one cluster to remove', 'Remove Cluster(s)','Cluster Removal');">Remove</a>
+	      <a id="manage_remove_cluster" href="#" onclick="verify_remove_clusters();">Remove</a>
 	    </div>
 	  </td>
 	  <td>
@@ -34,7 +34,7 @@
     </td>
   </tr>
   <tr id="cluster_list_row" {{bind-attr style="Pcs.manage_page"}}>
-    <td id="cluster_list" class="node_list" style="width:400px;">
+    <td id="cluster_list" class="node_list" style="width:350px;">
       <%= erb :_cluster_list %>
     </td>
     <td id="node_info" colspan=2>
@@ -42,45 +42,154 @@
 	<div id="node_info_header_title">INFORMATION ABOUT CLUSTERS</div>
       </div>
       <div id="node_sub_info">
-	<div id="no_cluster_selected">Select a cluster to view more detailed cluster information</div>
-	<% @clusters.each do |c| %>
-	  <div id="cluster_info_<%=c.name%>" style="display:none;">
-	    <table>
-	      <tr><td style="text-align:right"><b>Cluster:</b> </td><td><%=c.name%></td></tr>
-	      <% first = true %>
-	      <% c.nodes.each { |n| %>
-		<%= '<tr><td style="text-align:right"><b>Nodes:</b> </td>' if first %>
-		<%= "<tr><td></td>" if not first %>
-		  <td><%=n%></td></tr>
-		<% first = false %>
-	      <% } %>
-	    </table>
-	  </div>
-	<% end %>
+        {{#if Pcs.clusterController.cur_cluster}}
+        <div {{bind-attr id=Pcs.clusterController.cur_cluster.div_id}}>
+          <table>
+            <tr>
+              <td style="text-align:right">
+                <b>Cluster:</b> 
+              </td>
+              <td>
+                {{#if Pcs.clusterController.cur_cluster.forbidden}}
+                  {{Pcs.clusterController.cur_cluster.name}}
+                {{else}}
+                  <a {{bind-attr href=Pcs.clusterController.cur_cluster.url_link}}>{{Pcs.clusterController.cur_cluster.name}}</a> {{{Pcs.clusterController.cur_cluster.quorum_show}}}
+                {{/if}}
+              </td>
+            </tr>
+            {{#if Pcs.clusterController.cur_cluster.error_list}}
+              <tr><td style="text-align:right"><b>Errors:</b> </td><td></td></tr>
+            {{/if}}
+            {{#each Pcs.clusterController.cur_cluster.error_list}}
+              <tr><td></td><td style="color: red;">{{{message}}}</td></tr>
+            {{/each}}
+            {{#if Pcs.clusterController.cur_cluster.warning_list}}
+              <tr><td style="text-align:right"><b>Warnings:</b> </td><td></td></tr>
+            {{/if}}
+            {{#each Pcs.clusterController.cur_cluster.warning_list}}
+              <tr><td></td><td style="color: orange;">{{{message}}}</td></tr>
+            {{/each}}
+          </table><br>
+          {{#unless Pcs.clusterController.cur_cluster.forbidden}}
+          <table style="clear:left;float:left" class="nodes_list">
+            <tr>
+              <td class="datatable_header hover-pointer" onclick="show_hide_dashboard(this, 'nodes');">
+                <span style="display: none;" class="downarrow sprites"></span>
+                <span style="" class="rightarrow sprites"></span>
+                Nodes ({{Pcs.clusterController.cur_cluster.nodes.length}} | {{#if Pcs.clusterController.cur_cluster.nodes_failed}}<span style="color: red">issues: {{Pcs.clusterController.cur_cluster.nodes_failed}}{{else}}<span style="color: green;">OK{{/if}}</span>)
+              <span style="font-size: 10px;">(displaying {{#if Pcs.clusterController.show_all_nodes}}all{{else}}only issues{{/if}})</span>
+              </td>
+            </tr>
+            <tr>
+              <td>
+                <table class="datatable">
+                  <tr>
+                    <th style="width: 150px;">NODE</th>
+                    <th style="width: 100px;">STATUS</th>
+                    <th style="width: 70px;">QUORUM</th>
+                  </tr>
+                  {{#each node in Pcs.clusterController.cur_cluster.nodes}}
+                  <tr {{bind-attr title=node.tooltip}} {{bind-attr class=node.status_class}}>
+                    <td><a {{bind-attr href=node.url_link}}>{{node.name}}</a></td>
+                    <td {{bind-attr style=node.status_style}}>{{{node.status_icon}}}{{node.status}}</td>
+                    <td>{{{node.quorum_show}}}</td>
+                  </tr>
+                  {{/each}}
+                </table>
+              </td>
+            </tr>
+          </table>
+          {{#unless Pcs.clusterController.cur_cluster.status_unknown}}
+          <table style="clear:left;float:left" class="resources_list">
+            <tr>
+              <td class="datatable_header hover-pointer" onclick="show_hide_dashboard(this, 'resources');">
+                <span style="display: none;" class="downarrow sprites"></span>
+                <span style="" class="rightarrow sprites"></span>
+                Resources ({{Pcs.clusterController.cur_cluster.resource_list.length}} | {{#if Pcs.clusterController.cur_cluster.resources_failed}}<span style="color: red">issues: {{Pcs.clusterController.cur_cluster.resources_failed}}{{else}}<span style="color: green;">OK{{/if}}</span>)
+              <span style="font-size: 10px;">(displaying {{#if Pcs.clusterController.show_all_resources}}all{{else}}only issues{{/if}})</span>
+              </td>
+            </tr>
+            <tr>
+              <td>
+                <table class="datatable">
+                  <tr>
+                    <th style="width: 150px;">RESOURCE</th>
+                    <th style="width: 100px;">STATUS</th>
+                  </tr>
+                  {{#each r in Pcs.clusterController.cur_cluster.resource_list}}
+                  <tr {{bind-attr title=r.tooltip}} {{bind-attr class=r.status_class}}>
+                    <td><a {{bind-attr href=r.url_link}}>{{r.id}}</a></td>
+                    <td {{bind-attr style=r.status_style}}>{{{r.status_icon}}}{{r.status}}</td>
+                  </tr>
+                  {{else}}
+                  <tr>
+                    <td>No resources</td>
+                    <td></td>
+                  </tr>
+                  {{/each}}
+                </table>
+              </td>
+            </tr>
+          </table>
+          <table style="clear:left;float:left" class="fence_list">
+            <tr>
+              <td class="datatable_header hover-pointer" onclick="show_hide_dashboard(this, 'fence');">
+                <span style="display: none;" class="downarrow sprites"></span>
+                <span style="" class="rightarrow sprites"></span>
+                Fence-devices ({{Pcs.clusterController.cur_cluster.fence_list.length}} | {{#if Pcs.clusterController.cur_cluster.fence_failed}}<span style="color: red">issues: {{Pcs.clusterController.cur_cluster.fence_failed}}{{else}}<span style="color: green;">OK{{/if}}</span>)
+              <span style="font-size: 10px;">(displaying {{#if Pcs.clusterController.show_all_fence}}all{{else}}only issues{{/if}})</span>
+              </td>
+            </tr>
+            <tr>
+              <td>
+                <table class="datatable">
+                  <tr>
+                    <th style="width: 150px;">FENCE-DEVICE</th>
+                    <th style="width: 100px;">STATUS</th>
+                  </tr>
+                  {{#each f in Pcs.clusterController.cur_cluster.fence_list}}
+                  <tr {{bind-attr title=f.tooltip}} {{bind-attr class=f.status_class_fence}}>
+                    <td><a {{bind-attr href=f.url_link}}>{{f.id}}</a></td>
+                    <td {{bind-attr style=f.status_style}}>{{{f.status_icon}}}{{f.status}}</td>
+                  </tr>
+                  {{else}}
+                  <tr>
+                    <td>No fence devices</td>
+                    <td></td>
+                  </tr>
+                  {{/each}}
+                </table>
+              </td>
+            </tr>
+          </table>
+          {{/unless}}
+          {{/unless}}
+        </div>
+        {{else}}
+        <div id="no_cluster_selected">Select a cluster to view more detailed cluster information</div>
+        {{/if}}
       </div>
     </td>
   </tr>
 </table>
-<div id="verify_remove" style="display: none;">
-  <p style="font-size:12px;">Are you sure you want to remove the following cluster(s) from the GUI? (This only removes the cluster from the GUI, it does not stop the cluster from running)</p>
-  <span id="resource_to_remove"></span>
-</div>
+<%= erb :_dialogs %>
 <div id="add_existing_cluster" style="display: none;">
   <form id="add_existing_cluster_form" action="/manage/existingcluster" method="post">
     <br>
     Enter the hostname/IP of a node in a cluster that you would like to manage:
     <br>
     <br>
-    <table>
-      <tr><td align=center style="color: red" colspan=2"><span id="unable_to_connect_error_msg_ae" style="display:none;">Unable to contact node.<br>Please make sure pcsd is started and authenticated.<br/></span>
+    <table class="err_msg_table" style="width: 100%">
+      <tr><td align=center style="color: red" colspan=2"><span id="unable_to_connect_error_msg_ae" style="display:none;">Unable to contact node.<br>Please make sure pcsd is started.<br/></span>
 	  <span id="add_existing_cluster_error_msg" style="display:none;">
 	  </span>
       </td></tr>
+    </table>
+    <table>
       <tr><td align=right>Node Name/IP:</td><td><input size="50" name="node-name" type="text"></td></tr>
     </table>
   </form>
 </div>
-</div>
 <div id="create_new_cluster" style="display: none;">
   <form id="create_new_cluster_form" action="/manage/newcluster" method="post">
     <br>
@@ -93,7 +202,9 @@
       <tr><td align=right>Node 2:</td><td><input size="50" name="node-2" type="text"></input></td></tr>
       <tr><td align=right>Node 3:</td><td><input size="50" name="node-3" type="text"></input></td></tr>
       <tr><td></td><td id="manage_more_nodes" onclick="create_cluster_add_nodes();" style="color: #2B85DB;">More nodes...</td></tr>
-      <tr><td align=center style="color: red" colspan=2"><span id="unable_to_connect_error_msg" style="display:none;">Unable to contact nodes highlighted in red.<br><span style="color:orange;">Unable to authenticate to nodes highlighted in orange.</span><br>Please make sure pcsd is started on all nodes.</span></td></tr>
+    </table>
+    <table class="err_msg_table" style="width:100%">
+      <tr><td align=center style="color: red" colspan=2"><span id="unable_to_connect_error_msg" style="display:none;">Unable to contact nodes highlighted in red.<br>Please make sure pcsd is started on all nodes.</span></td></tr>
       <tr><td align=center style="color: red" colspan=2"><span id="at_least_one_node_error_msg" style="display:none;">At least one valid node must be entered.</span></td></tr>
       <tr><td align=center style="color: red" colspan=2"><span id="bad_cluster_name_error_msg" style="display:none;">You may not leave the cluster name field blank</span></td></tr>
       <tr><td align=center style="color: red" colspan=2"><span id="addr0_addr1_mismatch_error_msg" style="display:none;">Ring 1 addresses do not match to Ring 0 addresses</span></td></tr>
@@ -330,7 +441,25 @@ Specify ring 1 address for each node if you want to use RRP." %>
   <% if @error == "duplicatenodename" %>
     The node, '<%=@errorval%>' is already configured in pcsd.  You may not add a node to two different clusters in pcsd.
   <% end %>
+  <% if @error == "cannotgettokens" %>
+    Unable to get authentication info from cluster '<%=@errorval%>'.
+  <% end %>
+  <% if @error == "cannotsavetokens" %>
+    Unable to authenticate all nodes on node '<%=@errorval%>'.
+  <% end %>
+  <% if @error == "authimposible" %>
+    Operation successful.<br>Unable to do correct authentication of cluster because it is running old version of pcs/pcsd.
+  <% end %>
   <% if @error == "unabletocreate" %>
-    Unable to create new cluster because cluster already exists on one or more of the nodes.  Run 'pcs cluster destroy' on all nodes to remove current cluster configuration.<br><br>(<%=@errorval%>)
+    Unable to create new cluster. If cluster already exists on one or more of the nodes run 'pcs cluster destroy' on all nodes to remove current cluster configuration.<br><br><%=nl2br(@errorval)%>
+  <% end %>
+  <% if @error == "configversionsconflict" %>
+    Configuration conflict detected.<br><br>Some nodes had a newer configuration than the local node.  Local node's configuration was updated.  Please repeat the last action if appropriate.
+  <% end %>
+  <% if @error == "permissiondenied" %>
+    Permission denied.
+  <% end %>
+  <% if @error == "genericerror" %>
+    <%=nl2br(@errorval)%>
   <% end %>
 </div>
diff --git a/pcsd/views/nodes.erb b/pcsd/views/nodes.erb
index eba0b99..a554bc8 100644
--- a/pcsd/views/nodes.erb
+++ b/pcsd/views/nodes.erb
@@ -9,8 +9,8 @@
 	  <input type="checkbox" onchange="checkBoxToggle($(this), true);">
 	</div>
 	<div id="no_coro_support">
-	  <div class="x sprites"></div><div class="link" onclick="verify_node_remove();return false;"> Remove    </div>
-	  <div class="plus sprites"></div><div class="link" onclick="$('#add_node').dialog({title: 'Add Node', modal:true, width: 'auto'});return false;"> Add</div>
+	  <div class="x sprites"></div><div class="link" onclick="verify_remove_nodes(); return false;"> Remove    </div>
+	  <div class="plus sprites"></div><div class="link" onclick="add_node_dialog(); return false;"> Add</div>
 	</div>
       </td>
       <td class="borderbottom"> </td>
@@ -40,242 +40,275 @@
 	</table>
       </td>
       <td id="node_info" colspan=2>
-	<div id="node_info_div" style="opacity: 0;">
-	  <div id="test">
-	    <div id="node_info_header">
-	      <div id="node_info_header_title" class="node_info_header_title">Edit Node </div>
-	      <div id="node_info_header_title_name">
-{{Pcs.nodesController.cur_node.name}}
-	    </div>
-
-	    </div>
-
-	    <div id="node_sub_info">
-	      <table>
-		<tr>
-		  <td rowspan=2>
-				<input disabled style="margin-right: 50px;" type="text" {{bind-attr value="Pcs.nodesController.cur_node.name"}} size="35" class="text_field">
-		</td>
-		  <td><div style="margin-right: 8px;" class="check sprites"></div></td>
-		  <td>
-		    <!--		      <tr><td>pacemaker</td><td><div style="float:left;margin-right:6px;" class="check sprites"></div><div id="pacemaker_status" style="float:left" class="status"></div></td></tr> -->
-		    {{#if Pcs.nodesController.cur_node.pacemaker}}
-		    <div id="pacemaker_online_status" class="status">
-			Pacemaker Connected
-		    {{else}}
-		      {{#if Pcs.nodesController.cur_node.pacemaker_standby}}
-		    <div id="pacemaker_online_status" class="status-standby">
-			Pacemaker Standby
-		      {{else}}
-		    <div id="pacemaker_online_status" class="status-offline">
-			Pacemaker Not Connected
-		      {{/if}}
-		    {{/if}}
-		    </div>
-		  </td>
-		</tr>
-		<tr>
-		  <td><div style="margin-right: 8px;" class="check sprites"></div></td>
-		  <td>
-		    {{#if Pcs.nodesController.cur_node.corosync}}
-		    <div id="corosync_online_status" class="status">
-			Corosync Connected
-		    {{else}}
-		    <div id="corosync_online_status" class="status-offline">
-			Corosync Not Connected
-		    {{/if}}
-		    </div>
-		  </td>
-		</tr>
-	      </table>
-	    </div>
-
-	    <div id="node_options_buttons">
-	      <div id="node_start" class="link">
-		<div class="restart sprites" style="float: left"></div>
-		 Start
-	      </div>
-	      <div id="node_stop" class="link">
-		<div class="cancel sprites" style="float: left"></div>
-		 Stop
-	      </div>
-	      <div id="node_restart" class="link">
-		<div class="restart sprites" style="float: left"></div>
-		 Restart
-	      </div>
-	      <div id="node_unstandby" class="link" {{bind-attr style="Pcs.nodesController.cur_node.unstandby_style"}}>
-		<div class="unstandby sprites" style="float: left"></div>
-		 Unstandby
-	      </div>
-	      <div id="node_standby" class="link" {{bind-attr style="Pcs.nodesController.cur_node.standby_style"}}>
-		<div class="standby sprites" style="float: left"></div>
-		 Standby
-	      </div>
-	      <div class="configure sprites" style="float: left"></div>
-	      <div class="link"><a href="#/fencedevices" onclick="select_menu('FENCE DEVICES');return true;">Configure Fencing</a></div>
-	    </div>
-
-	    <div id="node_details">
-	      <table><tr>
-		  <td><div class="reg">Node ID:</div></td>
-		  <td><div class="bold">{{Pcs.nodesController.cur_node.node_id}}</div></td>
-		  <td><div class="reg"> Uptime:</div></td>
-		  <td><div class="bold" id="uptime">{{Pcs.nodesController.cur_node.uptime}}</div></td>
-		</tr>
-	      </table>
-	    </div>
+        <div id="node_info_div" style="opacity: 0;">
+          <div id="test">
+            <div id="node_info_header">
+              <div id="node_info_header_title" class="node_info_header_title">Edit Node </div>
+              <div id="node_info_header_title_name">
+                {{Pcs.nodesController.cur_node.name}}
+              </div>
+            </div>
 
-	    <table style="float:left;margin-top:25px">
-	      <tr><td class="datatable_header">Cluster Daemons</td></tr>
-	      <tr><td>
-		  <div id="clusterdaemons">
-		    <table class="datatable">
-		      <tr><th>NAME</th><th>STATUS</th></tr>
-		      <tr><td>pacemaker</td><td><div style="float:left;margin-right:6px;" class="check sprites"></div>
-{{#if Pcs.nodesController.cur_node.pacemaker_daemon}}
-<span id="pacemaker_status" style="float:left" class="status">Running ({{Pcs.nodesController.cur_node.pacemaker_startup}})</span>
-{{else}}
-{{#if Pcs.nodesController.cur_node.pcsd}}
-<span id="pacemaker_status" style="float:left" class="status-offline">Stopped ({{Pcs.nodesController.cur_node.pacemaker_startup}})</span>
-{{else}}
-<span id="pacemaker_status" style="float:left" class="status-unknown">Unknown ({{Pcs.nodesController.cur_node.pacemaker_startup}})</span>
-{{/if}}
-{{/if}}
-</td></tr>
-		      <tr><td>corosync</td><td><div style="float:left;margin-right:6px;" class="check sprites"></div>
-{{#if Pcs.nodesController.cur_node.corosync_daemon}}
-<span id="corosync_status" style="float:left" class="status">Running ({{Pcs.nodesController.cur_node.corosync_startup}})</span>
-{{else}}
-{{#if Pcs.nodesController.cur_node.pcsd}}
-<span id="corosync_status" style="float:left" class="status-offline">Stopped ({{Pcs.nodesController.cur_node.corosync_startup}})</span>
-{{else}}
-<span id="corosync_status" style="float:left" class="status-unknown">Unknown ({{Pcs.nodesController.cur_node.corosync_startup}})</span>
-{{/if}}
-{{/if}}
-</td></tr>
-		      <tr><td>pcsd</td><td><div style="float:left;margin-right:6px;" class="check sprites"></div>
-{{#if Pcs.nodesController.cur_node.pcsd}}
-<span id="pcsd_status" style="float:left" class="status">Running ({{Pcs.nodesController.cur_node.pcsd_startup}})</span>
-{{else}}
-  {{#if Pcs.nodesController.cur_node.authorized}}
-  <span id="pcsd_status" style="float:left" class="status-offline">Stopped ({{Pcs.nodesController.cur_node.pcsd_startup}})</span>
-  {{else}}
-  <span id="pcsd_status" style="float:left" class="status-offline">Running (not Authorized) ({{Pcs.nodesController.cur_node.pcsd_startup}})</span>
-  {{/if}}
-{{/if}}
-</td></tr>
-		    </table>
-		  </div>
-		</td>
-	      </tr>
-	    </table>
-	    <table style="clear:left;float:left;margin-top:25px;">
-	      <tr><td class="datatable_header">Running Resources</td></tr>
-	      <tr><td>
-		  <div id="resources_running">
-		    <table class="datatable">
-		      <tr><th>NAME</th></tr>
-		      {{#if Pcs.nodesController.cur_node.running_resources}}
-		        {{#each res in Pcs.nodesController.cur_node.running_resources}}
-			<tr><td>
-			    {{#unless res.stonith}}
-			      {{#link-to 'Resources.index' res}}{{res.name}} ({{res.res_type}}){{/link-to}}
-			    {{/unless}}
-			</td></tr>
-		        {{/each}}
-		      {{else}}
-			<tr><td style="color: gray;">NONE</td></tr>
-		      {{/if}}
-		    </table>
-		  </div>
-		</td>
-	      </tr>
-	    </table>
-	    <table style="clear:left;float:left;margin-top:25px;">
-	      <tr><td class="datatable_header">Resource Location Preferences</td></tr>
-	      <tr><td>
-		  <div id="locationdep">
-		    <table class="datatable">
-		      <tr><th>NAME</th><th>Score</th></tr>
-		    {{#if Pcs.nodesController.cur_node.location_constraints}}
-		    {{#each Pcs.nodesController.cur_node.location_constraints}}
-		      <tr><td>{{rsc}}</td><td>{{score}}</td><td></td></tr>
-		    {{/each}}
-		    {{else}}
-			<tr><td style="color: gray;">NONE</td><td></td></tr>
-		    {{/if}}
-		    </table>
-		  </div>
-		</td>
-	      </tr>
-	    </table>
-        <table style="clear:left;float:left;margin-top:25px;">
-          <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer">Node Attributes ({{#if Pcs.nodesController.cur_node_attr.length}}{{Pcs.nodesController.cur_node_attr.length}}{{else}}0{{/if}})<span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span></td></tr>
-          <tr><td>
-            <div id="locationdep">
-              <table class="datatable">
-                <tr><th>Attribute</th><th>Value</th><th>Remove</th></tr>
-                {{#each Pcs.nodesController.cur_node_attr}}
-                <tr><td>{{this.key}}</td>
-                  <td>{{this.value}}</td>
-                  <td {{bind-attr node_attr_key="this.key"}} {{bind-attr node_attr_parent="this.parent"}} style="text-align:center">
-                  <a onclick="remove_node_attr($(this).parent());return false;" href="#" class="remove">X</a>
+            <div id="node_sub_info">
+              <table>
+                <tr>
+                  <td rowspan=2>
+                    <input disabled style="margin-right: 50px;" type="text" {{bind-attr value="Pcs.nodesController.cur_node.name"}} size="35" class="text_field">
+                  </td>
+                  <td><div style="margin-right: 8px;" class="check sprites"></div></td>
+                  <td>
+                    {{#if Pcs.nodesController.cur_node.pacemaker}}
+                    {{#unless Pcs.nodesController.cur_node.pacemaker_standby}}
+                    {{#if Pcs.nodesController.cur_node.pacemaker_maintenance}}
+                    <div id="pacemaker_online_status" class="status-standby">
+                      Pacemaker Maintanence
+                    </div>
+                    {{else}}
+                    <div id="pacemaker_online_status" class="status">
+                      Pacemaker Connected
+                    </div>
+                    {{/if}}
+                    {{else}}
+                    <div id="pacemaker_online_status" class="status">
+                      Pacemaker Connected
+                    </div>
+                    {{/unless}}
+                    {{else}}
+                    {{#if Pcs.nodesController.cur_node.pacemaker_standby}}
+                    <div id="pacemaker_online_status" class="status-standby">
+                      Pacemaker Standby
+                    </div>
+                    {{else}}
+                    {{#if Pcs.nodesController.cur_node.pacemaker_maintenance}}
+                    <div id="pacemaker_online_status" class="status-standby">
+                      Pacemaker Maintanence
+                    </div>
+                    {{else}}
+                    <div id="pacemaker_online_status" class="status-offline">
+                      Pacemaker Not Connected
+                    </div>
+                    {{/if}}
+                    {{/if}}
+                    {{/if}}
                   </td>
                 </tr>
-                {{/each}}
-                {{#unless Pcs.nodesController.cur_node_attr}}
-                <tr><td style="color: gray;">NONE</td><td></td><td></td></tr>
-                {{/unless}}
-                <tr id="new_node_attr_col">
-                  <td><input type="text" name="new_node_attr_key" size="20"></td>
-                  <td><input type="text" name="new_node_attr_value" size="20"></td>
-                  <td><button type="button" onclick="add_node_attr('#new_node_attr_col');" name="add">Add</button></td>
+                <tr>
+                  <td><div style="margin-right: 8px;" class="check sprites"></div></td>
+                  <td>
+                    {{#if Pcs.nodesController.cur_node.corosync}}
+                    <div id="corosync_online_status" class="status">
+                      Corosync Connected
+                    </div>
+                    {{else}}
+                    <div id="corosync_online_status" class="status-offline">
+                      Corosync Not Connected
+                    </div>
+                    {{/if}}
+                  </td>
                 </tr>
+              </table>
+            </div>
+
+            <div id="node_options_buttons">
+              <div id="node_start" class="link">
+                <div class="restart sprites" style="float: left"></div>
+                 Start
+              </div>
+              <div id="node_stop" class="link">
+                <div class="cancel sprites" style="float: left"></div>
+                 Stop
+              </div>
+              <div id="node_restart" class="link">
+                <div class="restart sprites" style="float: left"></div>
+                 Restart
+              </div>
+              <div id="node_unstandby" class="link" {{bind-attr style="Pcs.nodesController.cur_node.unstandby_style"}}>
+                <div class="unstandby sprites" style="float: left"></div>
+                 Unstandby
+              </div>
+              <div id="node_standby" class="link" {{bind-attr style="Pcs.nodesController.cur_node.standby_style"}}>
+                <div class="standby sprites" style="float: left"></div>
+                 Standby
+              </div>
+              {{#if Pcs.nodesController.cur_node.is_in_maintenance}}
+              <div id="node_unmaintenance" class="link" onclick="fade_in_out('#node_unmaintenance');node_unmaintenance(Pcs.nodesController.cur_node.get('name'));">
+                <div class="unstandby sprites" style="float: left"></div>
+                 Unmaintenance
+              </div>
+              {{else}}
+              <div id="node_maintenance" class="link" onclick="fade_in_out('#node_maintenance');node_maintenance(Pcs.nodesController.cur_node.get('name'));">
+                <div class="standby sprites" style="float: left"></div>
+                 Maintenance
+              </div>
+              {{/if}}
+              <div class="configure sprites" style="float: left"></div>
+              <div class="link"><a href="#/fencedevices" onclick="select_menu('FENCE DEVICES');return true;">Configure Fencing</a></div>
+            </div>
 
+            <div id="node_details">
+              {{#unless Pcs.nodesController.cur_node.status_unknown}}
+              <table><tr>
+                <td><div class="reg">Node ID:</div></td>
+                <td><div class="bold">{{Pcs.nodesController.cur_node.node_id}}</div></td>
+                <td><div class="reg"> Uptime:</div></td>
+                <td><div class="bold" id="uptime">{{Pcs.nodesController.cur_node.uptime}}</div></td>
+              </tr>
               </table>
+              {{/unless}}
             </div>
-          </td>
-          </tr>
-        </table>
-       <table style="clear:left;float:left;margin-top:25px;">
-         <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer">Fence Levels ({{#if Pcs.nodesController.cur_node_fence_levels.length}}{{Pcs.nodesController.cur_node_fence_levels.length}}{{else}}0{{/if}})<span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span></td></tr>
-         <tr><td>
-             <div id="fencelevels">
-               <table class="datatable">
-                 <tr><th>Level</th><th>Fence Devices</th><th>Remove</th></tr>
-                 {{#each Pcs.nodesController.cur_node_fence_levels}}
-                 <tr>
-                   <td>{{this.level}}</td>
-                   <td>{{this.devices}}</td>
-                   <td {{bind-attr fence_level="this.level" fence_devices="this.devices"}}  style="text-align:center">
-                     <a onclick="add_remove_fence_level($(this).parent(),true);return false;" href="#" class="remove">X</a>
-                   </td>
-                 </tr>
-                 {{/each}}
-                 {{#unless Pcs.nodesController.cur_node_fence_levels}}
-                 <tr><td style="color: gray;">NONE</td><td></td><td></td></tr>
-                 {{/unless}}
-                 <tr id="new_fence_level_col">
-                   <td><input type="text" name="new_level_level" size="2"></td>
-                   <td><select name="new_level_value">
-                       <option></option>
-                       {{#each Pcs.resourcesController.stonith_resource_list}}
-                       <option {{bind-attr value="this"}}>{{this}}</option>
-                       {{/each }}
-                   </select></td>
-                   <td><button type="button" onclick="add_remove_fence_level($(this).parent());" name="add">Add</button></td>
-                 </tr>
-               </table>
-             </div>
-           </td>
-         </tr>
-       </table>
-     </td>
-   </tr>
-      </div>
-  </div>
-  </td>
-</tr>
+
+            <table style="float:left;margin-top:25px">
+              <tr><td class="datatable_header">Cluster Daemons</td></tr>
+              <tr><td>
+                <div id="clusterdaemons">
+                  <table class="datatable">
+                    <tr><th>NAME</th><th>STATUS</th></tr>
+                    <tr><td>pacemaker</td><td><div style="float:left;margin-right:6px;" class="check sprites"></div>
+                      {{#if Pcs.nodesController.cur_node.pacemaker_daemon}}
+                      <span id="pacemaker_status" style="float:left" class="status">Running ({{Pcs.nodesController.cur_node.pacemaker_startup}})</span>
+                      {{else}}
+                      {{#if Pcs.nodesController.cur_node.pcsd}}
+                      <span id="pacemaker_status" style="float:left" class="status-offline">Stopped ({{Pcs.nodesController.cur_node.pacemaker_startup}})</span>
+                      {{else}}
+                      <span id="pacemaker_status" style="float:left" class="status-unknown">Unknown ({{Pcs.nodesController.cur_node.pacemaker_startup}})</span>
+                      {{/if}}
+                      {{/if}}
+                    </td></tr>
+                    <tr><td>corosync</td><td><div style="float:left;margin-right:6px;" class="check sprites"></div>
+                      {{#if Pcs.nodesController.cur_node.corosync_daemon}}
+                      <span id="corosync_status" style="float:left" class="status">Running ({{Pcs.nodesController.cur_node.corosync_startup}})</span>
+                      {{else}}
+                      {{#if Pcs.nodesController.cur_node.pcsd}}
+                      <span id="corosync_status" style="float:left" class="status-offline">Stopped ({{Pcs.nodesController.cur_node.corosync_startup}})</span>
+                      {{else}}
+                      <span id="corosync_status" style="float:left" class="status-unknown">Unknown ({{Pcs.nodesController.cur_node.corosync_startup}})</span>
+                      {{/if}}
+                      {{/if}}
+                    </td></tr>
+                    <tr><td>pcsd</td><td><div style="float:left;margin-right:6px;" class="check sprites"></div>
+                      {{#if Pcs.nodesController.cur_node.pcsd}}
+                      <span id="pcsd_status" style="float:left" class="status">Running ({{Pcs.nodesController.cur_node.pcsd_startup}})</span>
+                      {{else}}
+                      {{#if Pcs.nodesController.cur_node.authorized}}
+                      <span id="pcsd_status" style="float:left" class="status-offline">Stopped ({{Pcs.nodesController.cur_node.pcsd_startup}})</span>
+                      {{else}}
+                      <span id="pcsd_status" style="float:left" class="status-offline">Running (not Authorized) ({{Pcs.nodesController.cur_node.pcsd_startup}})</span>
+                      {{/if}}
+                      {{/if}}
+                    </td></tr>
+                  </table>
+                </div>
+              </td>
+              </tr>
+            </table>
+            <table style="clear:left;float:left;margin-top:25px;">
+              <tr><td class="datatable_header">Running Resources</td></tr>
+              <tr><td>
+                <div id="resources_running">
+                  <table class="datatable">
+                    <tr><th>NAME</th></tr>
+                    {{#if Pcs.nodesController.cur_node.running_resources}}
+                    {{#each res in Pcs.nodesController.cur_node.running_resources}}
+                    <tr><td>
+                      {{#unless res.stonith}}
+                      {{#link-to 'Resources.index' res}}{{res.name}} ({{res.res_type}}){{/link-to}}
+                      {{/unless}}
+                    </td></tr>
+                    {{/each}}
+                    {{else}}
+                    <tr><td style="color: gray;">NONE</td></tr>
+                    {{/if}}
+                  </table>
+                </div>
+              </td>
+              </tr>
+            </table>
+            <table style="clear:left;float:left;margin-top:25px;">
+              <tr><td class="datatable_header">Resource Location Preferences</td></tr>
+              <tr><td>
+                <div id="locationdep">
+                  <table class="datatable">
+                    <tr><th>NAME</th><th>Score</th></tr>
+                    {{#if Pcs.nodesController.cur_node.location_constraints}}
+                    {{#each Pcs.nodesController.cur_node.location_constraints}}
+                    <tr><td>{{rsc}}</td><td>{{score}}</td><td></td></tr>
+                    {{/each}}
+                    {{else}}
+                    <tr><td style="color: gray;">NONE</td><td></td></tr>
+                    {{/if}}
+                  </table>
+                </div>
+              </td>
+              </tr>
+            </table>
+            <table style="clear:left;float:left;margin-top:25px;">
+              <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer">Node Attributes ({{#if Pcs.nodesController.cur_node_attr.length}}{{Pcs.nodesController.cur_node_attr.length}}{{else}}0{{/if}})<span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span></td></tr>
+              <tr><td>
+                <div id="node_attributes">
+                  <table class="datatable">
+                    <tr><th>Attribute</th><th>Value</th><th>Remove</th></tr>
+                    {{#each attr in Pcs.nodesController.cur_node_attr}}
+                    <tr><td>{{attr.name}}</td>
+                      <td>{{attr.value}}</td>
+                      <td {{bind-attr node_attr_key=attr.name}} {{bind-attr node_attr_parent=attr.parent}} style="text-align:center">
+                        <a onclick="remove_node_attr($(this).parent());return false;" href="#" class="remove">X</a>
+                      </td>
+                    </tr>
+                    {{else}}
+                    <tr><td style="color: gray;">NONE</td><td></td><td></td></tr>
+                    {{/each}}
+                    <tr id="new_node_attr_col">
+                      <td><input type="text" name="new_node_attr_key" size="20"></td>
+                      <td><input type="text" name="new_node_attr_value" size="20"></td>
+                      <td><button type="button" onclick="add_node_attr('#new_node_attr_col');" name="add">Add</button></td>
+                    </tr>
+                  </table>
+                </div>
+              </td>
+              </tr>
+            </table>
+            <table style="clear:left;float:left;margin-top:25px;">
+              <tr><td onclick="show_hide_constraints(this)" class="datatable_header hover-pointer">Fence Levels ({{#if Pcs.nodesController.cur_node_fence_levels.length}}{{Pcs.nodesController.cur_node_fence_levels.length}}{{else}}0{{/if}})<span style="" class="downarrow sprites"></span><span style="display: none;" class="rightarrow sprites"></span></td></tr>
+              <tr><td>
+                <div id="fencelevels">
+                  <table class="datatable">
+                    <tr><th>Level</th><th>Fence Devices</th><th>Remove</th></tr>
+                    {{#each Pcs.nodesController.cur_node_fence_levels}}
+                    <tr>
+                      <td>{{this.level}}</td>
+                      <td>{{this.devices}}</td>
+                      <td {{bind-attr fence_level="this.level" fence_devices="this.devices"}}  style="text-align:center">
+                        <a onclick="add_remove_fence_level($(this).parent(),true);return false;" href="#" class="remove">X</a>
+                      </td>
+                    </tr>
+                    {{/each}}
+                    {{#unless Pcs.nodesController.cur_node_fence_levels}}
+                    <tr><td style="color: gray;">NONE</td><td></td><td></td></tr>
+                    {{/unless}}
+                    <tr id="new_fence_level_col">
+                      <td><input type="text" name="new_level_level" size="2"></td>
+                      <td><select name="new_level_value">
+                        <option></option>
+                        {{#each Pcs.resourcesContainer.fence_list}}
+                        <option {{bind-attr value="this.id"}}>{{this.id}}</option>
+                        {{/each }}
+                      </select></td>
+                      <td><button type="button" onclick="add_remove_fence_level($(this).parent());" name="add">Add</button></td>
+                    </tr>
+                  </table>
+                </div>
+              </td>
+              </tr>
+            </table>
+            {{#if Pcs.nodesController.utilization_support}}
+            <table style="clear:left; float:left; margin-top: 25px;"><tr><td>
+            {{utilization-table entity=Pcs.nodesController.cur_node utilization=Pcs.nodesController.cur_node.utilization}}
+            </td></tr></table>
+            {{/if}}
+    </div>
+    </div>
+    </td>
+    </tr>
 <%= erb :_configure %>
 <%= erb :_acls %>
 <%= erb :_wizards %>
@@ -284,6 +317,7 @@
 <% @myView = "resource" %>
 <%= erb :_resource %>
 </table>
+<%= erb :_dialogs %>
 <div id="add_node" style="display: none;">
   <form method=POST action="/add_node_all">
     <table id="add_node_selector">
@@ -306,18 +340,6 @@
         <td colspan="2" style="color: orange">This is a CMAN cluster with UDPU transport,<br>cluster restart is required to apply node addition.</td>
       </tr>
       {{/if}}
-      <tr>
-        <td colspan=2 style="text-align:center;padding-top:20px;">
-          <input class="create_node" type=submit onclick="$(this).hide();create_node($(this).parents('form')); return false;" value="Add Node">
-        </td>
-      </tr>
     </table>
   </form>
 </div>
-<div id="remove_node" style="display:none;">
-  <p style="font-size:12px;">Are you sure you want to remove the following nodes(s)?</p>
-  <span id="nodes_to_remove"></span>
-  {{#if Pcs.is_cman_with_udpu_transport}}
-    <p style="color: orange">This is a CMAN cluster with UDPU transport, cluster restart is required to apply node removal.</p>
-  {{/if}}
-</div>
diff --git a/pcsd/views/permissions.erb b/pcsd/views/permissions.erb
new file mode 100644
index 0000000..1e38d7e
--- /dev/null
+++ b/pcsd/views/permissions.erb
@@ -0,0 +1,54 @@
+<table id="main">
+  <tr id="cluster_title_row" {{bind-attr style=Pcs.permissions_page}}>
+    <td id="page_header" colspan="2" style="vertical-align:middle;">
+      <table id="permissions_commands">
+        <tr>
+          <td>PERMISSIONS</td>
+        </tr>
+      </table>
+    </td>
+  </tr>
+  <tr id="cluster_header_row" {{bind-attr style=Pcs.permissions_page}}>
+    <td class="borderbottom" colspan="2"> </td>
+  </tr>
+  <tr id="cluster_list_row" {{bind-attr style=Pcs.permissions_page}}>
+    <td id="cluster_list" class="node_list" style="width:auto;">
+      <table cellpadding="0" cellspacing="0" style="width:100%;">
+        <tr>
+          <th> </th>
+          <th> </th>
+          <th>CLUSTER NAME</th>
+          <th style="padding-right: 16px;"> </th>
+        </tr>
+        <% @clusters.each do |c| %>
+          <tr
+            onmouseover="if(permissions_current_cluster != '<%= h(c.name) %>') hover_over(this);"
+            onmouseout="if(permissions_current_cluster != '<%= h(c.name) %>') hover_out(this);"
+            onclick="permissions_show_cluster('<%= h(c.name) %>', this);"
+          >
+            <td class="node_list_sprite">
+              <div class="check sprites"></div>
+            </td>
+            <td style="min-width:1em; padding-right:0.5em;">
+              <span class="unsaved_changes" style="display:none;"
+                title="There are unsaved changes in the form"
+              >(*)</span>
+            </td>
+            <td nowrap class="node_name">
+              <%= h(c.name) %>
+            </td>
+            <td>
+              <div class="arrow sprites" style="display:none;"></div>
+            </td>
+          </tr>
+        <% end %>
+      </table>
+    </td>
+    <td id="node_info">
+      <% @clusters.each { |c| %>
+        <div id="permissions_cluster_<%= h(c.name) %>" style="display:none;">
+        </div>
+      <% } %>
+    </td>
+  </tr>
+</table>
diff --git a/pcsd/views/resourceagentform.erb b/pcsd/views/resourceagentform.erb
index 3399461..039023e 100644
--- a/pcsd/views/resourceagentform.erb
+++ b/pcsd/views/resourceagentform.erb
@@ -4,30 +4,31 @@
       <table>
         <tr>
           <td><div class="bold">Description:</div></td>
-	  <td><span class="reg" style="float:left;"><%=h(@resource.short_desc)%> </span> <span title="<%=h(@resource.long_desc)%>" onclick="$(this).closest('table').find('.long_desc_div').toggle();" class="infoicon sprites" style="margin-top:2px;"></span></td>
+	  <td><span class="reg" style="float:left;"><%=h(@resource.short_desc)%> </span> <span title="<%=nl2br(h(@resource.long_desc))%>" onclick="$(this).closest('table').find('.long_desc_div').toggle();" class="infoicon sprites" style="margin-top:2px;"></span></td>
         </tr>
         <tr>
           <td></td>
           <td><div class="long_desc_div reg" style="display:none; font-size:12px; max-width:350px;"><%= nl2br(h(@resource.long_desc))%></div></td>
         </tr>
+	<% if @version != '2' %>
         <tr title="<%= h('Select a group to add the resource to.') %>">
 	  <td nowrap><div class="bold">Resource Group:</div></td>
 	  <td>
 	    <select name="resource_group">
 		<option value="">None</option>
 	      <% @groups.each do |g| %>
-		<option <%= "selected" if @cur_resource and g == @cur_resource.group %> value="<%=g%>"><%=g%></option>
+		<option <%= "selected" if g == @cur_resource_group %> value="<%=g%>"><%=g%></option>
 	      <% end %>
 	    </select>
-	    <input type=hidden name="_orig_resource_group" value="<%= @cur_resource.group if @cur_resource %>">
+	    <input type=hidden name="_orig_resource_group" value="<%= @cur_resource_group if @cur_resource_group %>">
 	  </td>
 	</tr>
 	<tr title='<%= h("Makes the resource run multiple times on the cluster. \
 By default the resource will run once on each of the nodes.") %>'>
 	  <td nowrap><div class="bold">Clone:</div></td>
 	  <td>
-	    <input type="hidden" name="_orig_resource_clone" value="<%= @cur_resource.clone if @cur_resource %>">
-	    <input type=checkbox name="resource_clone" <%= "checked" if @cur_resource && @cur_resource.clone %>>
+	    <input type="hidden" name="_orig_resource_clone" value="<%= @cur_resource_clone if @cur_resource %>">
+	    <input type=checkbox name="resource_clone" <%= "checked" if @cur_resource && @cur_resource_clone %>>
 	  </td>
 	</tr>
 	<tr title='<%= h("Makes the resource run multiple times on the cluster and \
@@ -36,10 +37,11 @@ By default the resource will run on one node in Master mode and on all other \
 nodes in Slave mode.") %>'>
 	  <td nowrap><div class="bold">Master/Slave:</div></td>
 	  <td>
-	    <input type="hidden" name="_orig_resource_ms" value="<%= @cur_resource.ms if @cur_resource %>">
-	    <input type=checkbox name="resource_ms" <%= "checked" if @cur_resource && @cur_resource.ms %>>
+	    <input type="hidden" name="_orig_resource_ms" value="<%= @cur_resource_ms if @cur_resource %>">
+	    <input type=checkbox name="resource_ms" <%= "checked" if @cur_resource && @cur_resource_ms %>>
 	  </td>
 	</tr>
+	<% end %>
 	<% if not @existing_resource %>
 	  <tr title="<%= h('Do not start the resource automatically after creating.') %>">
 	    <td nowrap><div class="bold">Disabled:</div></td>
@@ -73,7 +75,7 @@ nodes in Slave mode.") %>'>
 	    <%= name %>
 	  </td>
 	  <td>
-	    <input placeholder="<%=desc[0]%>" style="margin-right: 50px;" type="text" name="<%= @cur_resource && @cur_resource.options[name] ? "_res_paramne_" : "_res_paramempty_"%><%=name%>" value="<%=@cur_resource.options[name] if @existing_resource %>" size="35" class="text_field">
+	    <input placeholder="<%=desc[0]%>" style="margin-right: 50px;" type="text" name="<%= @cur_resource && @cur_resource.instance_attr[name] ? "_res_paramne_" : "_res_paramempty_"%><%=name%>" value="<%=h(@cur_resource.instance_attr[name].value) if @existing_resource && @cur_resource && @cur_resource.instance_attr[name] %>" size="35" class="text_field">
 	  </td>
 	</tr>
       <% } %>
@@ -89,7 +91,7 @@ nodes in Slave mode.") %>'>
 	    <%= name %>
 	  </td>
 	  <td>
-	    <input placeholder="<%=desc[0]%>" style="margin-right: 50px;" type="text" name="<%= @cur_resource && @cur_resource.options[name] ? "_res_paramne_" : "_res_paramempty_"%><%=name%>" value="<%=@cur_resource.options[name] if @existing_resource %>" size="35" class="text_field">
+	    <input placeholder="<%=desc[0]%>" style="margin-right: 50px;" type="text" name="<%= @cur_resource && @cur_resource.instance_attr[name] ? "_res_paramne_" : "_res_paramempty_"%><%=name%>" value="<%=h(@cur_resource.instance_attr[name].value) if @existing_resource && @cur_resource && @cur_resource.instance_attr[name] %>" size="35" class="text_field">
 	  </td>
 	</tr>
       <% } %>
diff --git a/pcsd/views/resourcedeps.erb b/pcsd/views/resourcedeps.erb
deleted file mode 100644
index 3973a0f..0000000
--- a/pcsd/views/resourcedeps.erb
+++ /dev/null
@@ -1,83 +0,0 @@
-  <table id="main" width=930>
-    <tr><td id="page_header" colspan=3>
-	RESOURCE DEPENDENCIES
-      </td>
-    </tr>
-    <tr>
-      <td id="remove_add" class="borderbottom">
-	<div class="x sprites"><a href="#" class="x sprites"></a></div><div class=link"> Remove    </div>
-	<div class="plus sprites"></div><div class="link"> Add    </div>
-	<div class="plus sprites"></div><div class="link"> Create Group</div>
-      </td>
-      <td class="borderbottom"> </td>
-      <td id="tab_name" width="135">
-	<div style="float: left"> Edit Dependencies </div><div class="check sprites"></div>
-      </td>
-    </tr>
-    <tr>
-      <td id="node_list">
-	<table cellpadding=0 cellspacing=0 style="float:left;">
-	  <% @nodes.each  do |i,node| %>
-	    <% if i != @cur_node %>
-	      <tr onmouseover="$(this).css('background-color', 'e4f5fd');$(this).find('td').last().css('display','');" onmouseout="$(this).css('background-color','ffffff');$(this).find('td').last().css('display','none');" onclick="window.location='/nodes/<%=i%>'">
-	      <% else %>
-		<tr style="background-color: #e4f5fd">
-	      <% end %>
-	    <td>
-	      <input class="node_list_check" type="checkbox">
-	    </td>
-	    <td nowrap style="cursor:default;padding-right:20px;">
-	      <%= node.name %>
-	    </td>
-	    <td class="resource_type">
-	      <%= node.hostname %>
-	    </td>
-	    <td nowrap><div style="margin-right:5px;" class="previewmap sprites"></div>
-	      Preview Map
-	    </td>
-	    <% if i != @cur_node %>
-	      <td style="display:none">
-	      <% else %>
-	      <td>
-	      <% end %>
-	      <div class="arrow sprites"></div>
-	    </td>
-	  </tr>
-	<% end %>
-	</table>
-      </td>
-      <td id="node_info" colspan=2>
-	<div id="test">
-	  <div id="node_info_header">
-	    <div id="node_info_header_title">Edit</div>
-	    <div id="node_info_header_title_name"> <%=@cur_node.name%></div>
-	  </div>
-
-	  <div id="node_sub_info">
-	    <table>
-	      <tr>
-		<td><input style="margin-right: 50px;" type="text" value="<%=@cur_node.name%>" size="35" class="text_field"></td>
-	      </tr>
-	    </table>
-	  </div>
-
-	  <div id="node_details">
-	    <table><tr>
-		<td nowrap><div class="bold resource_name">THIS RESOURCE</div></td>
-	      </tr>
-	      <tr>
-		<td><div class="reg"><%=@cur_node.hostname%></div></td>
-	      </tr>
-	    </table>
-	  </div>
-
-	  <table>
-	    <tr><td class="center" style="padding-top:20px;" colspan=2>
-		<input type=submit class="text_field" value="Apply Changes">
-	      </td>
-	    </tr>
-	  </table>
-      </td>
-    </tr>
-  </div>
-</table>
diff --git a/pcsd/wizards/apache.rb b/pcsd/wizards/apache.rb
index 81475c1..738520d 100644
--- a/pcsd/wizards/apache.rb
+++ b/pcsd/wizards/apache.rb
@@ -29,11 +29,11 @@ class ApacheWizard < PCSDWizard
 
     puts "PCS NAME"
     puts PCS
-    puts run_cmd(PCS, 'resource','create','shared_dev', 'LVM', 'volgrpname='+vg)
-    puts run_cmd(PCS, 'resource','create','shared_fs', 'Filesystem', 'device='+device, 'directory=/var/www/html', 'fstype="ext4"', 'options="ro"')
-    puts run_cmd(PCS, 'resource','create','Apache','apache', 'configfile="/etc/httpd/conf/httpd.conf"', 'statusurl="http://127.0.0.1/server-status"')
-    puts run_cmd(PCS, 'resource','create','ClusterIP','IPaddr2',"ip="+ip, "cidr_netmask="+nm)
-    puts run_cmd(PCS, 'resource','group','add','ApacheGroup','shared_dev','shared_fs','ClusterIP','Apache')
+    puts run_cmd(session, PCS, 'resource','create','shared_dev', 'LVM', 'volgrpname='+vg)
+    puts run_cmd(session, PCS, 'resource','create','shared_fs', 'Filesystem', 'device='+device, 'directory=/var/www/html', 'fstype="ext4"', 'options="ro"')
+    puts run_cmd(session, PCS, 'resource','create','Apache','apache', 'configfile="/etc/httpd/conf/httpd.conf"', 'statusurl="http://127.0.0.1/server-status"')
+    puts run_cmd(session, PCS, 'resource','create','ClusterIP','IPaddr2',"ip="+ip, "cidr_netmask="+nm)
+    puts run_cmd(session, PCS, 'resource','group','add','ApacheGroup','shared_dev','shared_fs','ClusterIP','Apache')
     out = "Resources created..."
     return out.gsub(/\n/,"<br>")
   end
diff --git a/setup.py b/setup.py
index 6a6cb14..c76d2b2 100644
--- a/setup.py
+++ b/setup.py
@@ -3,12 +3,12 @@
 from distutils.core import setup
 
 setup(name='pcs',
-    version='0.9.139',
+    version='0.9.148',
     description='Pacemaker Configuration System',
     author='Chris Feist',
     author_email='cfeist at redhat.com',
     url='http://github.com/feist/pcs',
     packages=['pcs'],
-    package_data={'pcs':['corosync.conf.template','corosync.conf.fedora.template','bash_completion.d.pcs','pcs.8']}, 
+    package_data={'pcs':['bash_completion.d.pcs','pcs.8']},
     py_modules=['pcs']
     )

-- 
Alioth's /usr/local/bin/git-commit-notice on /srv/git.debian.org/git/debian-ha/pcs.git



More information about the Debian-HA-Commits mailing list